diff --git "a/2151.jsonl" "b/2151.jsonl" new file mode 100644--- /dev/null +++ "b/2151.jsonl" @@ -0,0 +1,1283 @@ +{"seq_id":"74635487813","text":"from typing import TextIO, Dict\r\nimport xml.etree.ElementTree as ET\r\nfrom model import *\r\n\r\nargs_names = \"xyzabcdefgh\"\r\n\r\ndef set_actions_tension(actions: List[Action], tension: Dict) -> List[Action]:\r\n return [Action(a.name, a.params, a.preconditions, a.effects, tension[a.name]) for a in actions]\r\n\r\ndef parse_xml(filename):\r\n tree = ET.parse(filename)\r\n root = tree.getroot()\r\n\r\n objects = []\r\n facts = []\r\n actions = []\r\n predicates = []\r\n tension = {}\r\n\r\n for child in root:\r\n for elem in child:\r\n attrs = elem.attrib\r\n if child.tag == \"objects\":\r\n objects.append(Object(attrs['name'], attrs['type']))\r\n elif child.tag == \"relations\":\r\n name = attrs['name']\r\n object_names = []\r\n for param in elem:\r\n object_names.append(param.attrib['value'])\r\n facts.append(Fact(name, object_names))\r\n elif child.tag == \"operators\":\r\n action_name = attrs['name']\r\n params = []\r\n conditions = []\r\n effects = []\r\n for property in elem: \r\n if property.tag == \"parameters\":\r\n for parameter in property:\r\n params.append(Object(parameter.attrib[\"name\"], parameter.attrib[\"type\"]))\r\n elif property.tag == \"preconditions\":\r\n for precondition in property:\r\n name = precondition.attrib['predicate']\r\n negated = True if 'negation' in precondition.attrib else False\r\n object_names = []\r\n for param in precondition:\r\n object_names.append(param.attrib['name'])\r\n conditions.append(Fact(name, object_names, negated))\r\n elif property.tag == \"effects\":\r\n for effect in property:\r\n name = effect.attrib['predicate']\r\n negated = True if 'negation' in effect.attrib else False\r\n object_names = []\r\n for param in effect:\r\n object_names.append(param.attrib['name'])\r\n effects.append(Fact(name, object_names, negated))\r\n actions.append(Action(action_name, params, conditions, effects, 0))\r\n elif child.tag == \"eventeffects\":\r\n tension[attrs['name']] = TS_VALUES[attrs['tension']]\r\n elif child.tag == \"predicates\":\r\n name = attrs['name']\r\n initialstate = True if attrs['initialstate'] == \"true\" else False\r\n goalstate = True if attrs['goalstate'] == \"true\" else False\r\n oposite = attrs['oposite'] if 'oposite' in attrs else None\r\n params = []\r\n for param in elem:\r\n unique = True if 'unique' in param.attrib else False\r\n params.append(Parameter(param.attrib['type'], unique))\r\n pred = Predicate(name, initialstate, goalstate, params)\r\n if oposite:\r\n pred.oposite = oposite\r\n predicates.append(pred)\r\n \r\n actions = set_actions_tension(actions, tension)\r\n return World(objects, facts, actions, predicates)\r\n\r\ndef get_grouped_objects(objects: List[Object]):\r\n grouped = {}\r\n for o in objects:\r\n if o.type in grouped:\r\n grouped[o.type].append(o.name)\r\n else:\r\n grouped[o.type] = [o.name]\r\n return grouped\r\n\r\ndef write_facts(facts: List[Fact], f: TextIO):\r\n for i, fact in enumerate(facts):\r\n if fact.is_negated:\r\n f.write(\"(not \")\r\n f.write(f\"({fact.name}\")\r\n for arg in fact.arguments:\r\n f.write(f\" ?{arg}\")\r\n f.write(\")\")\r\n if fact.is_negated:\r\n f.write(\")\")\r\n if i != len(facts)-1:\r\n f.write(\"\\n\\t\\t\\t\\t\\t\")\r\n else:\r\n f.write(\")\")\r\n\r\ndef write_state_facts(facts: List[Fact], f: TextIO):\r\n for i, fact in enumerate(facts):\r\n if fact.is_negated:\r\n f.write(\"(not \")\r\n f.write(f\"({fact.name}\")\r\n for arg in fact.arguments:\r\n f.write(f\" {arg}\")\r\n f.write(\")\")\r\n if fact.is_negated:\r\n f.write(\")\")\r\n if i != len(facts)-1:\r\n f.write(\"\\n\\t\\t\")\r\n else:\r\n f.write(\")\")\r\n\r\ndef write_object_grouped(grouped_objects: Dict[str, str], f: TextIO):\r\n for object_type in grouped_objects:\r\n for object in grouped_objects[object_type]:\r\n f.write(f\"({object_type} {object})\\n\\t\\t\")\r\n\r\ndef generate_domain(world: World, name: str = \"zombie\"):\r\n with open(f\"./{name}/{name}.pddl\", \"w\") as file:\r\n file.write(f\"(define (domain {name})\\n\\t(:requirements :strips)\\n\")\r\n file.write(f\"\\t(:predicates\\n\")\r\n\r\n types = set([o.type for o in world.objects])\r\n for type in types:\r\n file.write(f\"\\t\\t({type} ?x)\\n\")\r\n \r\n unique_fact_names = []\r\n predicates_arity = []\r\n for p in world.predicates:\r\n if p.name not in unique_fact_names:\r\n predicates_arity.append((p.name, len(p.parameters)))\r\n unique_fact_names.append(p.name)\r\n for f in world.facts:\r\n if f.name not in unique_fact_names:\r\n predicates_arity.append((f.name, len(f.arguments)))\r\n unique_fact_names.append(f.name)\r\n predicates_arity += [(\"starving\", 1), (\"knowneed\", 3)]\r\n print(predicates_arity)\r\n\r\n for pred, arity in predicates_arity:\r\n file.write(f\"\\n\\t\\t({pred}\")\r\n for i in range(arity):\r\n file.write(f\" ?{args_names[i]}\")\r\n file.write(\")\")\r\n file.write(\")\")\r\n\r\n for action in world.actions:\r\n file.write(f\"\\n\\t\\t(:action {action.name}\")\r\n file.write(f\"\\n\\t\\t:parameters (\")\r\n for param in action.params:\r\n file.write(f\" ?{param.name}\")\r\n file.write(\")\")\r\n file.write(f\"\\n\\t\\t:precondition (and\\t\")\r\n for param in action.params:\r\n file.write(f\"({param.type} ?{param.name})\\n\\t\\t\\t\\t\\t\")\r\n write_facts(action.preconditions, file)\r\n file.write(f\"\\n\\t\\t:effect (and\\t\\t\")\r\n write_facts(action.effects, file)\r\n file.write(\")\")\r\n file.write(\")\")\r\n\r\ndef generate_problem(individual: Individual, world: World, index: int, name: str = \"zombie\"):\r\n with open(f\"./{name}/problem{index}.pddl\", \"w\") as file:\r\n file.write(f\"(define (problem {name}_{index})\\n\")\r\n file.write(f\"\\t(:domain {name})\\n\")\r\n file.write(\"\\t(:objects\")\r\n grouped_objects = get_grouped_objects(world.objects)\r\n for object_type in grouped_objects:\r\n for object in grouped_objects[object_type]:\r\n file.write(f\" {object}\")\r\n file.write(\"\\n\\t\")\r\n file.write(\")\")\r\n file.write(f\"\\n\\t(:init\\t\")\r\n write_object_grouped(grouped_objects, file) \r\n write_state_facts(individual.initial_state, file)\r\n file.write(f\"\\n\\n\\t(:goal (and\\t\")\r\n write_state_facts(individual.goal_state, file)\r\n file.write(\"))\")","repo_name":"happyberry/strips-ea","sub_path":"xml_pddl_parser.py","file_name":"xml_pddl_parser.py","file_ext":"py","file_size_in_byte":7449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"12185999631","text":"arr=[]\nfor i in range(2,2278):\n flag=True\n\n n=i\n while i>0:\n rem=i%10\n # print(rem)\n if rem==2 or rem==3 or rem==5 or rem==7:\n flag &= True\n # print(\"rem={}\".format(rem))\n else:\n flag &= False\n i=i//10\n if flag:\n arr.append(n)\nt=int(input())\nfor i in range(t):\n n=int(input())\n res=arr[n-1]\n print(res)\n\n\n\n\n\n","repo_name":"gondsuryaprakash/Python-Tutorial","sub_path":"CP/Geeks For Geeks/Nth number made of prime digits.py","file_name":"Nth number made of prime digits.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37340388772","text":"from typing import List\r\nclass Solution:\r\n def letterCombinations(self, digits: str) -> List[str]:\r\n dict = {\r\n '2': 'abc',\r\n '3': 'def',\r\n '4': 'ghi',\r\n '5': 'jkl',\r\n '6': 'mno',\r\n '7': 'pqrs',\r\n '8': 'tuv',\r\n '9': 'wxyz'\r\n }\r\n\r\n if digits == \"\":\r\n return []\r\n\r\n digits_list = []\r\n for i in range(len(digits)):\r\n digits_list.append(digits[i])\r\n\r\n output = []\r\n\r\n def combineNext(letter='', i=0):\r\n if len(digits_list) > i:\r\n digit = digits_list[i]\r\n for le in dict[digit]:\r\n k = letter + le\r\n combineNext(k, i + 1)\r\n\r\n else:\r\n output.append(letter)\r\n\r\n combineNext()\r\n return output\r\nrun = Solution()\r\nprint(run.letterCombinations(\"3745\"))","repo_name":"dh43a0198/HS_C-study","sub_path":"Leetcode_letter.py","file_name":"Leetcode_letter.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"4259866297","text":"\nfrom django.shortcuts import render\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom user_app.serializers import RegistroSerializer\nfrom rest_framework import status\nfrom rest_framework.authtoken.models import Token\n\n\nfrom user_app import models\n\n# Create your views here.\n\n@api_view(['POST'])\ndef LogoutView(request):\n if request.method == 'POST':\n data = request.data\n print(data)\n data.auth_token.delete()\n return Response(status=status.HTTP_200_OK)\n\n@api_view(['POST'])\ndef RegistroView(request):\n\n if request.method == 'POST':\n serializer = RegistroSerializer(data=request.data)\n\n data = {}\n\n if serializer.is_valid():\n account = serializer.save()\n data['response'] = ' El registro del usuario fue exitoso'\n data['username'] = account.username\n data['email'] = account.email\n token = Token.objects.get(user=account).key\n data['token'] = token\n else:\n data = serializer.errors\n\n return Response(data, status=status.HTTP_200_OK)\n","repo_name":"lucholal57/INCLUsoft","sub_path":"Django/Back-End_Django/inclusoft/user_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21627541774","text":"from lxml import html\r\nfrom urllib.parse import urlparse\r\nimport requests\r\n\r\ndef get_urls_from_string(page_content, base_url):\r\n arr = []\r\n tree = html.fromstring(page_content)\r\n tree.make_links_absolute(base_url)\r\n for elem in tree.iter():\r\n if elem.tag == 'a':\r\n arr.append(elem.get('href'))\r\n return arr\r\n\r\ndef normalize_url(url):\r\n obj = urlparse(url)\r\n normalized_url = f\"{obj.netloc}{obj.path}\"\r\n lowercased = normalized_url.lower()\r\n if len(lowercased) < 1:\r\n return lowercased\r\n last_slash_removed = lowercased if lowercased[:-1] != '/' else lowercased[:-1]\r\n return last_slash_removed\r\n\r\ndef crawl_page(base_url, current_url, pages):\r\n normalized_url = normalize_url(current_url)\r\n\r\n if normalized_url not in pages:\r\n pages[normalized_url] = 0\r\n\r\n parsed_base = urlparse(base_url)\r\n parsed_curr = urlparse(current_url)\r\n\r\n if parsed_curr.netloc != parsed_base.netloc:\r\n pages[normalized_url] = None\r\n return pages\r\n\r\n if pages[normalized_url] is None:\r\n return pages\r\n\r\n if pages[normalized_url] > 0:\r\n pages[normalized_url] += 1\r\n\r\n o = requests.get(current_url)\r\n\r\n try:\r\n validate_response(o)\r\n except Exception as e:\r\n print(e)\r\n pages[current_url] = None\r\n return pages\r\n\r\n pages[normalized_url] += 1\r\n\r\n url_arr = get_urls_from_string(o.content, base_url)\r\n for url in url_arr:\r\n crawl_page(base_url, url, pages)\r\n return pages\r\n\r\ndef validate_response(resp):\r\n if resp.status_code != 200:\r\n raise Exception('The response object is not called successfully')\r\n\r\n if \"text/html\" not in resp.headers[\"content-type\"].lower():\r\n raise Exception('The object called is not of the type html')\r\n","repo_name":"khushpatil/Python","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"19099697665","text":"# Exercise in PL lang.\r\n# Dany jest graf ważony z dodatnimi wagami G. Dana jest też lista E’ krawędzi, które nie należą do grafu, ale są krawędziami między wierzchołkami z G. Dane są również dwa wierzchołki s i t.\r\n# Podaj algorytm, który stwierdzi, którą jedną krawędź z E’ należy wszczepić do G, aby jak najbardziej zmniejszyć dystans między s i t.\r\n# Jeżeli żadna krawędź nie poprawi dystansu między s i t, to algorytm powinien to stwierdzić.\r\n\r\nfrom queue import PriorityQueue\r\n\r\n# complexity:\r\n# -time O(ElogV)\r\n# -memory O(V)\r\n\r\n\r\ndef Dijkstra_distances(graph,begin):\r\n n = len(graph)\r\n distances = [float(\"inf\")] * n\r\n p_queue = PriorityQueue()\r\n distances[begin] = 0\r\n for e,w in graph[begin]:\r\n p_queue.put((begin,e,w)) # begin, end, weight of edge\r\n\r\n while not p_queue.empty():\r\n b,e,w = p_queue.get()\r\n if distances[e] > distances[b] + w:\r\n distances[e] = distances[b] + w\r\n for children,weight in graph[e]:\r\n p_queue.put((e,children,weight))\r\n\r\n return distances\r\n\r\n\r\ndef best_edge(graph,extra_edges,s,t):\r\n s_distances = Dijkstra_distances(graph,s) # runing twice Dijkstra, to find lowest distances to every vertices, from s, t vertex\r\n t_distances = Dijkstra_distances(graph,t)\r\n best_edge = False\r\n lowest_distance = s_distances[t] + 0 # removing reverence\r\n\r\n for b,e,w in extra_edges:\r\n if s_distances[b] + t_distances[e] + w < lowest_distance:\r\n lowest_distance = s_distances[b] + t_distances[e] + w\r\n best_edge = b,e,w\r\n return best_edge\r\n\r\n\r\n# undirected graph list adjacency\r\ngraph = [\r\n [(1,4),(3,3)],\r\n [(0,4),(4,10),(5,2)],\r\n [(3,9),(4,5),(5,6)],\r\n [(0,3),(2,9),(4,9)],\r\n [(1,10),(2,5),(3,9),(5,7)],\r\n [(1,2),(2,6),(4,7)]\r\n]\r\n\r\nextra_edges = [(0,4,15),(4,0,15),(1,0,3),(2,5,7),(5,2,7),(1,4,3),(4,1,3),(2,4,1),(4,2,1)]\r\n\r\nprint(best_edge(graph,extra_edges,0,4))","repo_name":"HITOfficial/College","sub_path":"ASD/Lekcja 15 12.06.2021/distance_reducing.py","file_name":"distance_reducing.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"34501123277","text":"import dataset \nimport sys\n\ndb1 = dataset.connect('postgresql://postgres:postgres@localhost:5432/ips')\ndb2 = dataset.connect('postgresql://postgres:postgres@localhost:5432/bautista')\ndb3 = dataset.connect('postgresql://postgres:postgres@localhost:5432/migone')\ndb4 = dataset.connect('postgresql://postgres:postgres@localhost:5432/hospitales')\n\ndb4.create_table('tabla_paciente', primary_id='ci', primary_type='Integer')\n\n\n\n\n#Crear tabla pacientes e insertar datos en la tabla del hospital_1\n\ndb1.create_table('paciente', primary_id='ci', primary_type='Integer')\ntable1 = db1['paciente']\ndata = dict(ci=1234567, sexo='Masculino', nombre='Marcos Aseretto', edad=45, lugar_fecha_nac='Caaguazu', ocupacion='Docente', religion='Catolico', raza='Latino', domicilio='San Lorenzo', telefono=987456321 )\ntable1.insert(data)\ndata = dict(ci=6541234, sexo='Femenino', nombre='Julia Gonzalez', edad=16, lugar_fecha_nac='Asuncion', ocupacion='Estudiante', religion='Judio', raza='Asiatico', domicilio='San Antonio', telefono=985123963 )\ntable1.insert(data)\n\nres = db1.query('CREATE TABLE historial (ci_paciente integer REFERENCES paciente (ci))')\ntable1 = db1['historial']\ndata = dict(ci_paciente=1234567, fecha_hist=1461969753, sintomas='Dolor de huesos', diagnostico='Dengue', enfermedad='Dengue', hospital='ips', responsable='Francisco Garay')\ntable1.insert(data)\ndata = dict(ci_paciente=1234567, fecha_hist=1261949753, sintomas='Estornudos', diagnostico='Gripe', enfermedad='Resfriado', hospital='ips', responsable='Ruben Acosta')\ntable1.insert(data)\ndata = dict(ci_paciente=1234567, fecha_hist=1461169733, sintomas='Fatiga', diagnostico='Gripe', enfermedad='Resfriado', hospital='ips', responsable='Federico Aseretto')\ntable1.insert(data)\ndata = dict(ci_paciente=6541234, fecha_hist=1154103753, sintomas='Fiebre', diagnostico='Zika', enfermedad='Dengue', hospital='ips', responsable='Sebastian Garay')\ntable1.insert(data)\ndata = dict(ci_paciente=6541234, fecha_hist=1354103753, sintomas='Dolores musculares', diagnostico='Gripe', enfermedad='Dengue', hospital='ips', responsable='Jose Gonzalez')\ntable1.insert(data)\ndata = dict(ci_paciente=6541234, fecha_hist=1356103753, sintomas='Lagrimeo en los ojos', diagnostico='Alergia', enfermedad='Resfriado', hospital='ips', responsable='Sebastian Garay')\ntable1.insert(data)\n\n#Crear tabla pacientes e insertar datos en la tabla del hospital_2\ndb2.create_table('paciente', primary_id='ci', primary_type='Integer')\ntable2 = db2['paciente']\ndata = dict(ci=7412589, sexo='Masculino', nombre='Pedro Dominguez', edad=11, lugar_fecha_nac='Encarnacion', ocupacion='Estudiante', religion='Protestante', raza='Latino', domicilio='Encarnacion', telefono=983124697 )\ntable2.insert(data)\ndata = dict(ci=3357159, sexo='Masculino', nombre='Jose Pereira', edad=29, lugar_fecha_nac='Asuncion', ocupacion='Electricista', religion='Catolico', raza='Latino', domicilio='San Lorenzo', telefono=981357159 )\ntable2.insert(data)\n\nres = db2.query('CREATE TABLE historial (ci_paciente integer REFERENCES paciente (ci))')\ntable2 = db2['historial']\ndata = dict(ci_paciente=7412589, fecha_hist=1458142153, sintomas='Fiebre', diagnostico='Zika', enfermedad='Dengue', hospital='bautista', responsable='Jose Garay')\ntable2.insert(data)\ndata = dict(ci_paciente=7412589, fecha_hist=1448142153, sintomas='Nauseas', diagnostico='Dengue', enfermedad='H1N1', hospital='bautista', responsable='Federico Garay')\ntable2.insert(data)\ndata = dict(ci_paciente=7412589, fecha_hist=1438142153, sintomas='Fatiga', diagnostico='Zika', enfermedad='H1N1', hospital='bautista', responsable='Federico Gonzalez')\ntable2.insert(data)\ndata = dict(ci_paciente=3357159, fecha_hist=1453402553, sintomas='Zarpullido', diagnostico='Chikunguya', enfermedad='Zika', hospital='bautista', responsable='Ruben Garay')\ntable2.insert(data)\ndata = dict(ci_paciente=3357159, fecha_hist=1453412553, sintomas='Dolor de cabeza', diagnostico='H1N1', enfermedad='Gripe', hospital='bautista', responsable='Francisco Figueredo')\ntable2.insert(data)\ndata = dict(ci_paciente=3357159, fecha_hist=1453432553, sintomas='Escalofrios y sudoracion', diagnostico='H1N1', enfermedad='H1N1', hospital='bautista', responsable='Sebastian Figueredo')\ntable2.insert(data)\n\n#Crear tabla pacientes e insertar datos en la tabla del hospital_3\ndb3.create_table('paciente', primary_id='ci', primary_type='Integer')\ntable3 = db3['paciente']\ndata = dict(ci=3124598, sexo='Femenino', nombre='Francisca Garay', edad=26, lugar_fecha_nac='Asuncion', ocupacion='Futbolista', religion='Catolico', raza='Asiatico', domicilio='Fernando de la Mora', telefono=985534912 )\ntable3.insert(data)\ndata = dict(ci=999536, sexo='Femenino', nombre='Rene Peralta', edad=80, lugar_fecha_nac='Coronel Oviedo', ocupacion='Artesano', religion='Catolico', raza='Latino', domicilio='Coronel Oviedo', telefono=961479312 )\ntable3.insert(data)\n\nres = db3.query('CREATE TABLE historial (ci_paciente integer REFERENCES paciente (ci))')\ntable3 = db3['historial']\ndata = dict(ci_paciente=3124598, fecha_hist=1290375753, sintomas='Presion en el pecho', diagnostico='Anemia', enfermedad='Anemia', hospital='migone', responsable='Federico Aseretto')\ntable3.insert(data)\ndata = dict(ci_paciente=3124598, fecha_hist=1390375753, sintomas='Mareos', diagnostico='Anemia', enfermedad='H1N1', hospital='migone', responsable='Federico Figueredo')\ntable3.insert(data)\ndata = dict(ci_paciente=3124598, fecha_hist=1490375753, sintomas='Ampollas', diagnostico='Chikunguya', enfermedad='Chikunguya', hospital='migone', responsable='Federico Garay')\ntable3.insert(data)\ndata = dict(ci_paciente=999536, fecha_hist=1074842953, sintomas='Palidez', diagnostico='Falta de alimentacion', enfermedad='Anemia', hospital='migone', responsable='Ruben Villamayor')\ntable3.insert(data)\ndata = dict(ci_paciente=999536, fecha_hist=1174842953, sintomas='Dolor de articulaciones', diagnostico='Dengue', enfermedad='Chikunguya', hospital='migone', responsable='Sebastian Villamayor')\ntable3.insert(data)\t\ndata = dict(ci_paciente=999536, fecha_hist=974842953, sintomas='Conjuntivitis', diagnostico='Conjuntivitis', enfermedad='Chikunguya', hospital='migone', responsable='Ruben Villamayor')\ntable3.insert(data)\n","repo_name":"fedecar94/webServiceTP2","sub_path":"poblar_bd.py","file_name":"poblar_bd.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72121726853","text":"# mqtt_connect.py\n\nimport os\nimport paho.mqtt.client as mqtt\nfrom flask import Blueprint, jsonify\nfrom models import Device, FeedTime\nfrom datetime import datetime\n\n# Blueprint for adding and editing user devices\nmqtt_connect_bp = Blueprint('mqtt_connect', __name__)\n\nMQTT_BROKER = 'mqtt_broker'\nMQTT_PORT = 1883\n\nclass MQTTClient:\n def __init__(self):\n self.client = mqtt.Client()\n self.client.on_message = self.on_message\n self.client.connect(MQTT_BROKER, MQTT_PORT, 60)\n self.client.subscribe(\"sensor_data/food_level\")\n self.client.loop_start()\n self.mqtt_data = {}\n\n def on_message(self, client, userdata, message):\n payload = message.payload.decode()\n try:\n nickname, sensor_data = payload.split(';')\n nickname = nickname.split(': ')[1].strip()\n food_level = sensor_data.split(': ')[1].strip()\n self.mqtt_data[nickname] = food_level\n except ValueError:\n print(\"Invalid MQTT message format\")\n\n def publish_feedtimes(self, device_id):\n device = Device.query.get(device_id)\n if not device:\n return jsonify({\"error\": \"Device not found!\"}), 404\n\n feedtimes = FeedTime.query.filter_by(device_id=device_id).all()\n time_portion_pairs = [f\"{datetime.strptime(feedtime.time, '%H:%M:%S').strftime('%H:%M')}:{feedtime.portions}\" for feedtime in feedtimes]\n\n topic = \"feedtimes/\" + device.nickname\n payload = \",\".join(time_portion_pairs)\n\n self.client.publish(topic, payload, retain=True, qos=1)\n print(\"Message published\")\n\n return jsonify({\"message\": \"Feed times and portions published successfully!\"}), 200\n\nmqtt_client = MQTTClient()\n\n@mqtt_connect_bp.route('/publish_feedtimes/', methods=['POST'])\ndef publish_feedtimes(device_id):\n return mqtt_client.publish_feedtimes(device_id)\n\n@mqtt_connect_bp.route('/latest_mqtt_message')\ndef get_latest_mqtt_message():\n return jsonify({'message': mqtt_client.mqtt_data})\n","repo_name":"thebreckoning/LaikaTek","sub_path":"mqtt_connect.py","file_name":"mqtt_connect.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"14852870919","text":"import cv2\nimport numpy as np\nimport time\n\n# 使用cv2.getTickCount()计时\nimg1 = cv2.imread('logo.jpg')\nt1 = cv2.getTickCount()\nfor i in range(5, 49, 2):\n img1 = cv2.medianBlur(img1, i)\nt2 = cv2.getTickCount()\nt = (t2 - t1)/cv2.getTickFrequency()\nprint('Result I got is ', t, ' seconds')\n\n# 使用time.clock()计时\nimg2 = cv2.imread('logo.jpg')\nstart = time.clock()\nfor i in range(5, 49, 2):\n img2 = cv2.medianBlur(img2, i)\nelapsed = (time.clock() - start)\nprint('Result I got is ', elapsed, ' seconds')\n","repo_name":"crazybonmb/opencv_ex","sub_path":"DIP/11_1.py","file_name":"11_1.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"17529157389","text":"import os\nimport json\n\n\ndef loader(observer):\n pwd = os.environ[\"ANALYSIS_BOAT_HOME\"]\n for file in os.listdir(pwd+\"/dataes\"):\n file_path = pwd + \"/dataes/\" + file\n print(file_path)\n\n # jsonファイルを読み込む\n f = open(file_path)\n # jsonデータを読み込んだファイルオブジェクトからPythonデータを作成\n observer.on_next(json.load(f))\n f.close()\n observer.on_completed()\n","repo_name":"satellitex/boatrace-analysis","sub_path":"utils/json_load.py","file_name":"json_load.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"16169981002","text":"\nimport numpy as np\n\"\"\"\nEjercicio4:\n Pedir dos numeros al usuario y hacer todas las \n operaciones básicas de una calculadora y mostrarlo por pantalla.\n\"\"\"\n\n# print(\"###CALCULADORA###\")\n# num1 = int(input(\"Ingrese el primer número: \"))\n# num2 = int(input(\"Ingrese el segundo número: \"))\n\n# # suma = num1 + num2\n# # resta = num1 - num2\n# # multiplicacion = num1 * num2\n# # division = num1 / num2\n\n# print(\"La suma es: {}\".format(num1+num2))\n# print(\"La resta es: {}\".format(num1-num2))\n# print(\"La multiplicación es: {}\".format(num1*num2))\n# print(\"La división es: {}\".format(num1/num2))\n\na = np.array([(1, 2, 3), (4, 5, 6)])\na = a / 2\nprint(a)","repo_name":"julian20568/Master-Python","sub_path":"07-ejercicios/Ejrcicio4.py","file_name":"Ejrcicio4.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35880326716","text":"import gdcm\nimport sys\n\ndef decompress2raw(input_file, output_file):\n file1 = input_file\n file2 = output_file\n reader = gdcm.ImageReader()\n reader.SetFileName( file1 )\n \n if not reader.Read():\n sys.exit(1)\n \n change = gdcm.ImageChangeTransferSyntax()\n change.SetTransferSyntax( gdcm.TransferSyntax(gdcm.TransferSyntax.ImplicitVRLittleEndian) )\n change.SetInput( reader.GetImage() )\n if not change.Change():\n sys.exit(1)\n \n writer = gdcm.ImageWriter()\n writer.SetFileName( file2 )\n writer.SetFile( reader.GetFile() )\n writer.SetImage( change.GetOutput() )\n \n if not writer.Write():\n sys.exit(1)\n \n# if __name__ == \"__main__\":\n# file1 = sys.argv[1] # input filename\n# file2 = sys.argv[2] # output filename\n","repo_name":"zdadadaz/echocv","sub_path":"decompress.py","file_name":"decompress.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"294908513","text":"\"\"\"3.31 O Sr. Manoel Joaquim expandiu seus negócios para além dos negócios de 1,99 e agora possui uma loja de conveniências.\nFaça um programa que implemente uma caixa registradora rudimentar. O programa deverá receber um número desconhecido de\nvalores referentes aos preços das mercadorias. Um valor zero deve ser informado pelo operador para indicar o final da compra.\nO programa deve então mostrar o total da compra e perguntar o valor em dinheiro que o cliente forneceu,\npara então calcular e mostrar o valor do troco. Após esta operação, o programa deverá voltar ao ponto inicial,\npara registrar a próxima compra. A saída deve ser conforme o exemplo abaixo:\nLojas Tabajara\nProduto 1: R$ 2.20\nProduto 2: R$ 5.80\nProduto 3: R$ 0\nTotal: R$ 9.00\nDinheiro: R$ 20.00\nTroco: R$ 11.00\"\"\"\n\nsoma = i = juros = 0\nr = 'S'\nlista = []\n\nprint('-'*15)\nprint('LOJAS TABAJARA')\nprint('-'*15)\n\nwhile r in 'Ss':\n i += 1\n preco = float(input(f'Preço do Produto {i}: R$'))\n\n while preco < 0:\n preco = float(input('Insira um preço válido para o Produto {i}: R$'))\n\n soma += preco\n r = input('Há mais produtos na lista? [S/N] ').strip().upper()\n\n while r not in 'SN':\n r = input('Entrada inválida. Digite [S ou N]: ').strip().upper()\n\n tupla = (f'Produto {i}', f'{preco:.2f}')\n lista.append(tupla)\n\nprint('\\nLista de compras:')\nfor item in range(len(lista)):\n print(f'{lista[item][0]} - R${lista[item][1]}')\nprint('-'*20)\nprint(f'Total: R${soma:.2f}')\n\npagamento = float(input('\\nValor a ser pago: R$'))\nif pagamento < soma:\n faltante = soma - pagamento\n opcao = int(input(f'Faltam R${faltante:.2f}, por favor selecione uma opção abaixo: \\n'\n f'[ 1 ] Pagar a quantia de R${faltante:.2f} agora\\n'\n f'[ 2 ] Informar data de pagamento (2% ao dia)\\n'))\n if opcao == 1:\n pagamento = float(input(f'Insira a quantia R${faltante:.2f} faltante: R$'))\n while pagamento < faltante:\n faltante -= pagamento\n pagamento = float(input(f'Insira a quantia R${faltante:.2f} faltante: R$'))\n print(f'Pagamento de R${pagamento:.2f} confirmado.')\n if pagamento == faltante:\n print('Não deverá receber troco.')\n print('Fim.')\n else:\n troco = pagamento - faltante\n print(f'Deverá receber troco de R${troco:.2f}')\n print('Fim.')\n elif opcao == 2:\n data = int(input('Daqui quantos dias irá realizar o pagamento? '))\n for i in range(data):\n juros += (2/100)*faltante\n print(f'Valor a ser pago na data: R${faltante+juros:.2f}')\nelif pagamento > soma:\n troco = pagamento - soma\n print(f'Troco: R${troco:.2f}')\n print('Fim.')\n","repo_name":"juacturus/programming-lessons","sub_path":"python/python-brasil/03-estruturas-repeticao/ex31_loja_manoel.py","file_name":"ex31_loja_manoel.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"10334117462","text":"import os\nimport requests\nimport subprocess\nimport nbformat\nimport tempfile\nimport streamlit as st\n\n\n## Function to return all repositories of a profile\ndef repoaccess(user):\n username = user.split(\"/\")[-1]\n url = \"https://api.github.com/users/{}/repos\".format(username)\n response = requests.get(url)\n repo=[]\n if response.status_code == 200:\n repositories=response.json()\n for repository in repositories:\n repo.append(repository['html_url'])\n return repo\n else:\n print(\"Enter valid github username\")\n\n## function to extract code cells for jupyter notebook\ndef extract_code_cells(notebook_path):\n nb = nbformat.read(notebook_path, nbformat.NO_CONVERT)\n code_cells = []\n\n for cell in nb.cells:\n if cell.cell_type == 'code':\n source = cell.source.strip()\n code_cells.append(source)\n\n return code_cells\n\n## function to format code cells\ndef format_code_cells(code_cells):\n formatted_code = []\n\n for code_cell in code_cells:\n formatted_code.append(f\"{code_cell}\")\n\n return '\\n'.join(formatted_code)\n\n## function to check if value is float or not\ndef is_float(string):\n try:\n float(string)\n return True\n except ValueError:\n return False\n\n## function to delete unwanted files\ndef delete_files_except_extensions(directory, extensions):\n for filename in os.listdir(directory):\n file_path = os.path.join(directory, filename)\n if os.path.isfile(file_path):\n file_extension = os.path.splitext(filename)[1]\n if file_extension not in extensions:\n os.remove(file_path) ","repo_name":"lonewolf235/Technical-Repo-Analyzer","sub_path":"fileprocessing.py","file_name":"fileprocessing.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"7728957374","text":"import pytest\n\nfrom .plistlib import dumps, UID\n\n\ndef test_UID():\n instance = UID((1 << 32) - 1)\n assert instance.data == 4294967295\n\n with pytest.raises(ValueError):\n UID((1 << 32) + 8)\n\n with pytest.raises(ValueError):\n UID(1 << 64)\n\ndef test_plist_writer():\n\n # Ensure the `plistlib._PlistWriter` bytes method is patched\n assert dumps({ 'foo': b'\\01\\00' * 28 }).split(b'\\n') == \\\n [\n b'',\n b'',\n b'',\n b'',\n b'\\tfoo',\n b'\\tAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQA=',\n b'',\n b'',\n b''\n ]\n\n assert dumps({ 'foo': b'\\01\\00' * 29 }).split(b'\\n') == \\\n [\n b'',\n b'',\n b'',\n b'',\n b'\\tfoo',\n b'\\t',\n b'\\tAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB',\n b'\\tAAEAAQABAA==',\n b'\\t',\n b'',\n b'',\n b''\n ]\n","repo_name":"Qonfused/OCE-Build","sub_path":"third_party/cpython/plistlib_test.py","file_name":"plistlib_test.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"44"} +{"seq_id":"17164837149","text":"import os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n# Constants to access spot robot\nSPOT_USERNAME = os.environ.get(\"SPOT_USERNAME\", \"admin\")\nSPOT_PASSWORD = os.environ.get(\"SPOT_PASSWORD\", \"2zqa8dgw7lor\")\nSPOT_IP = os.environ.get(\"SPOT_IP\", \"192.168.50.3\")\n\nINTERACTION_MODE = os.environ.get(\"INTERACTION_MODE\", \"movement\")\n\n# Videoserver url\nVIDEOSERVER_URL = os.environ[\"VIDEOSERVER_URL\"]\n\n# Security token to execute video server commands\nVIDEOSERVER_TOKEN = os.environ.get(\"VIDEOSERVER_TOKEN\", \"\")\n\nUSE_ROBONOMICS = os.environ.get(\"USE_ROBONOMICS\", 1)\nROBONOMICS_LISTEN_ROBOT_ACCOUNT = os.environ.get(\"ROBONOMICS_LISTEN_ROBOT_ACCOUNT\",\n \"4FNQo2tK6PLeEhNEUuPePs8B8xKNwx15fX7tC2XnYpkC8W1j\")\nPINATA_API_KEY = os.environ[\"PINATA_API_KEY\"]\nPINATA_SECRET_API_KEY = os.environ[\"PINATA_SECRET_API_KEY\"]\n\nESTUARY_URL = os.environ[\"ESTUARY_URL\"]\nESTUARY_TOKEN = os.environ[\"ESTUARY_TOKEN\"]\n\nMOVEMENT_SESSION_DURATION_TIME = 120\n\nDEMO_API_URL = os.environ.get('DEMO_API_URL', 'https://api.merklebot.com/spot-demo')\n\nADMIN_ACCOUNTS = os.environ.get('ADMIN_ACCOUNTS',\n '4HVVtYPQ8hu7XGKQPmwjhTTHK5crSsiitJpLsA4B4PQV1PNr,4G1SKuxjYkm7AtbMzjpZZnXdt3sShj7nrvEB9dxLcVYJe87P,4HY2Mb4fpsyz6vyWHd3xGPgnHC983junioxhT2Cnfa5Kok5b').split(\n ',')\nAUTH_SECRET = os.getenv('AUTH_SECRET', 'abcdefg')\n\nMNEMONIC = os.environ[\"MNEMONIC\"]\n\nTRACES_DIR = os.environ[\"TRACES_DIR\"]\nCALIBRATIONS_DIR = os.environ[\"CALIBRATIONS_DIR\"]\n\nWEB3_STORAGE_API_KEY = os.environ[\"WEB3_STORAGE_API_KEY\"]\n\nTELEGRAM_BOT_TOKEN = os.environ[\"TELEGRAM_BOT_TOKEN\"]\nTELEGRAM_BOT_USER_ID = os.environ[\"TELEGRAM_BOT_USER_ID\"]\n\nIPFS_COMMAND_GATEWAY = os.getenv('IPFS_COMMAND_GATEWAY', 'https://merklebot.mypinata.cloud/ipfs')\n","repo_name":"merklebot/spot.merklebot.com","sub_path":"spot/settings/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21226877035","text":"# -*- This python file uses the following encoding : utf-8 -*-\n\nimport sys\nimport os\nimport time\nimport threading\nfrom functools import partial\n\nfrom PySide6.QtWidgets import QApplication, QMainWindow, QFrame, QLabel, QPushButton, QWidget, QSlider, QMessageBox, QSpacerItem, QSizePolicy\nfrom PySide6.QtCore import QObject, QTimer, Slot, Qt, Signal, QThreadPool\nfrom PySide6.QtMultimedia import QMediaRecorder, QMediaPlayer\n\nfrom ui.chat_window import Ui_ChatWindow\nfrom styles import Clients, SendButton, Player as PlayerStyle\nfrom server import Server\nfrom client import Client\n\nfrom user import User\nfrom message import Message\nfrom recorder import Recorder\nfrom player import Player\nfrom netscanner import NetScanner\nfrom notification import NotificationWidget\nimport utils\n\n# Global variables for recorder time counter\nseconds = minutes = 0\n\n\nclass ChatWindow(QWidget):\n \"\"\"\n Initialize chat window to show conversations and start chatting\n \"\"\"\n def __init__(self):\n QWidget.__init__(self)\n self.ui = Ui_ChatWindow()\n self.ui.setupUi(self)\n\n # SHOW USERS / CONVERSATION LIST\n users = User.where(\"id\", \">\", 1)\n for user in users:\n self.ui.show_user_widget(user)\n\n # CONNECT USER'S CONVERSATION BUTTONS\n self.ui.conversationButtonPressed.connect(self.show_conversations)\n\n # CONNECT SEND BUTTON\n self.ui.entry_field.textEdited.connect(self.change_send_style)\n self.ui.entry_field.returnPressed.connect(self.send_text_or_record)\n self.ui.send_button.clicked.connect(self.send_text_or_record)\n\n # START SERVER\n self.server = Server()\n self.server.start()\n\n # LISTEN FOR MESSAGE SIGNALS\n self.server.messageReceived.connect(self.show_incoming_message)\n\n # SCAN NETWORK TO FIND CONNECTED DEVICES\n self.server_hosts = {}\n\n # Scan on startup\n QTimer().singleShot(60_000, self.scan_network)\n\n # Scan network to refresh active servers\n self.net_scanner = QTimer()\n self.net_scanner.timeout.connect(self.scan_network)\n self.net_scanner.start(300_000)\n\n # CREATE RECORDER INSTANCE AND ASSOCIATED TIME COUNTER\n self.recorder = Recorder()\n self.record_timer = QTimer()\n self.record_timer.timeout.connect(self.time_counter)\n\n self.recorder.recorderStateChanged.connect(self.recorder_state_changed)\n self.recorder.recordConfirmed.connect(self.send_media)\n\n # PLAYER SETUP\n self.player = Player()\n self.player.errorOccurred.connect(lambda error: print(error))\n\n self.ui.playButtonPressed.connect(self.play)\n\n # MESSAGES AND CONVERSATIONS -------------------------------------------------------\n\n @Slot(str)\n def show_conversations(self, button_object_name: str):\n \"\"\"\n Shows conversation bubbles with a specified user\n \"\"\"\n # TRY TO STOP MEDIA PLAYER\n self.player.stop()\n\n # GET USER UUID\n user_uuid = button_object_name\n\n user = User.first_where(\"uuid\", \"=\", user_uuid)\n user_name = user.get_user_name()\n\n # SET NAME TO THE ACTIVE CLIENT LABEL\n self.ui.active_client.setText(user_name)\n self.ui.active_client.setObjectName(user_uuid)\n self.ui.active_client.show()\n self.ui.delete_button.show()\n\n # REMOVE ACTUAL VISIBLE CHAT BUBBLES\n try:\n for index in reversed(range(1, self.ui.layout_bubble.count())):\n self.ui.layout_bubble.itemAt(index).widget().deleteLater()\n # The widget at index 0 is a layout spacer, we don't need to delete it\n # That's why we end with index 1\n except Exception as e: # If chat field was not visible or is empty\n print(e)\n\n # SHOW OLDER MESSAGES WITH THE ACTIVE USER IN NEW BUBBLES\n messages = user.messages()\n for message in messages:\n sender_id = message.get_sender_id()\n\n # Knowing that the user with id == 1 is the owner,\n # messages sent from user_id 1 will be shown in the right bubble\n if sender_id == 1:\n self.ui.create_right_bubble(message)\n if self.ui.message_status.objectName().startswith(\"error\"):\n self.ui.message_status.clicked.connect(self.resend_message)\n else:\n self.ui.create_left_bubble(message)\n\n # CLEAR MESSAGE COUNTER AND SHOW ONLINE TOAST IF SELECTED USER IS ONLINE\n message_counter = self.ui.left_scroll.findChild(QLabel, f\"{user_uuid}_counter\")\n message_counter.setText(\"0\")\n message_counter.hide()\n\n # Reset to normal style sheet (important in case of unread messages)\n message_counter.parent().setStyleSheet(Clients.frame_normal)\n\n # Connect delete messages button\n self.ui.delete_button.clicked.connect(self.delete_messages)\n\n @Slot(int)\n def show_incoming_message(self, id: int):\n \"\"\"\n Shows incoming message bubble or increase new message counter\n \"\"\"\n message = Message.find(id)\n user = User.find(message.get_sender_id())\n\n if self.ui.active_client.objectName() and self.ui.active_client.objectName() == user.get_uuid():\n # Show message in the bubble\n self.ui.create_left_bubble(message)\n else:\n # Show notification widget\n self.notification_widget = NotificationWidget(user.get_user_name())\n self.notification_widget.show()\n\n # Increase the unread message counter badge\n message_counter = self.ui.left_scroll.findChild(QLabel, f\"{user.get_uuid()}_counter\")\n unread_msg = int(message_counter.text())\n unread_msg += 1\n message_counter.setText(f\"{unread_msg}\")\n\n try:\n message_counter.show()\n except Exception as e:\n print(f\"Error while trying to show counter widget {e}\")\n\n message_counter.parent().setStyleSheet(Clients.frame_unread_msg)\n\n @Slot()\n def change_send_style(self):\n \"\"\"\n Changes send button style, and disable media button so that a user can not send media message and text message\n at a time.\n \"\"\"\n if self.ui.entry_field.text():\n # Change send button style\n self.ui.send_button.setStyleSheet(SendButton.style_send)\n # Disable media button\n self.ui.media_button.setEnabled(False)\n\n else:\n self.ui.send_button.setStyleSheet(SendButton.style_record)\n self.ui.media_button.setEnabled(True)\n\n @Slot()\n def send_text_or_record(self):\n \"\"\"\n According to the send button style, send text message or record a voice\n \"\"\"\n if not self.ui.active_client.text():\n QMessageBox.warning(self, \"Destinataire non défini\",\n \"Veuillez sélectionner d'abord votre destinataire !\",\n QMessageBox.StandardButton.Ok)\n\n # SEND TEXT MESSAGE\n elif self.ui.entry_field.text():\n receiver = User.first_where(\"uuid\", \"=\", self.ui.active_client.objectName())\n receiver_id = receiver.get_id()\n\n text_message = self.ui.entry_field.text()\n\n message = Message()\n message.set_sender_id(1)\n message.set_receiver_id(receiver_id)\n message.set_kind(\"text\")\n message.set_body(text_message)\n\n # Send message and get it back with the status report modified\n client = Client(receiver.get_host_address())\n message = client.send_message(message)\n\n # Save text message in database\n message.save()\n\n # Show bubble\n self.ui.create_right_bubble(message)\n\n # Reset some ui states\n self.ui.entry_field.setText(None)\n self.ui.send_button.setStyleSheet(SendButton.style_record)\n self.ui.media_button.setEnabled(True)\n\n # RECORD VOICE MESSAGE\n elif not self.ui.entry_field.text():\n self.ui.media_button.setEnabled(False)\n self.ui.send_button.setEnabled(False)\n self.record_voice()\n\n @Slot(str, str)\n def send_media(self, kind: str, path: str):\n \"\"\"\n Sends the media message and shows bubble\n \"\"\"\n receiver = User.first_where(\"uuid\", \"=\", self.ui.active_client.objectName())\n receiver_id = receiver.get_id()\n\n message = Message()\n message.set_sender_id(1)\n message.set_receiver_id(receiver_id)\n message.set_kind(kind)\n message.set_body(path)\n\n # Send message and get it back with the status report modified\n client = Client(receiver.get_host_address())\n message = client.send_message(message)\n\n # Save media message in database\n message.save()\n\n # Show bubble\n self.ui.create_right_bubble(message)\n\n @Slot()\n def resend_message(self):\n \"\"\"\n Resend a message that failed\n \"\"\"\n clicked_button = self.sender()\n\n # Find message and user by id from the object name of clicked button\n message_id = clicked_button.objectName().split(\"_\")[1]\n message = Message.find(int(message_id))\n receiver = User.find(message.get_receiver_id())\n\n # Send message\n client = Client(receiver.get_host_address())\n message = client.send_message(message)\n\n # Update in database\n message.update()\n\n # Delete old bubble and create a new one\n clicked_button.parent().deleteLater()\n self.ui.create_right_bubble(message)\n\n @Slot()\n def delete_messages(self):\n user = User.first_where(\"uuid\", \"=\", self.ui.active_client.objectName())\n messages = user.messages()\n\n print(\"Deleting conversation of you with\", user.get_user_name())\n for index in reversed(range(1, self.ui.layout_bubble.count())):\n self.ui.layout_bubble.itemAt(index).widget().deleteLater()\n\n for message in messages:\n if message.get_kind() != \"text\":\n try:\n os.remove(message.get_body())\n except Exception as e:\n print(\"Error while trying to delete file: \", e)\n\n message.delete()\n\n # MEDIA RECORDER -----------------------------------------------------------------\n\n @Slot()\n def record_voice(self):\n \"\"\"\n Starts recording voice message\n \"\"\"\n # SHOW RECORD WIDGET INDICATOR AND CONNECT ACTION BUTTONS\n self.ui.show_record_widget()\n self.ui.end_record.clicked.connect(self.recorder._stop)\n self.ui.cancel_record.clicked.connect(self.recorder.cancel)\n\n self.recorder._record()\n self.record_timer.start(1000)\n\n def time_counter(self):\n \"\"\"\n Show recording time\n \"\"\"\n global seconds, minutes\n\n seconds += 1\n if seconds == 60:\n minutes += 1\n seconds = 0\n\n time_counter = \"%02d:%02d\" % (minutes, seconds)\n self.ui.record_time.setText(time_counter)\n\n def recorder_state_changed(self):\n \"\"\"\n Perform some actions according to the recording state\n \"\"\"\n global seconds, minutes\n\n if self.recorder.recorderState() == QMediaRecorder.StoppedState:\n self.record_timer.stop()\n self.ui.record_tip.deleteLater()\n seconds = minutes = 0\n self.ui.media_button.setEnabled(True)\n self.ui.send_button.setEnabled(True)\n\n # MEDIA PLAYER ----------------------------------------------------------------------\n\n @Slot(QPushButton)\n def play(self, play_button: QPushButton):\n \"\"\"\n Play/Pause Media\n \"\"\"\n # IF PLAYER IS IN PLAYING STATE, THE PAUSE\n if play_button.objectName() == \"playing\":\n self.player._pause()\n\n else:\n # Try to stop an eventual playing player\n self.player.stop()\n\n # GET PATH, ELAPSED AND TOTAL TIME LABELS\n parent = play_button.parent()\n title_label, elapsed_time, total_time = parent.findChildren(QLabel)\n slider = parent.findChild(QSlider)\n\n path = title_label.objectName().split(\"|\")[1]\n self.player = Player()\n self.player._play(path)\n\n # Show GUI indications of playing state\n play_button.setStyleSheet(PlayerStyle.pause)\n play_button.setObjectName(\"playing\")\n slider.setMaximum(self.player.duration())\n total_time.setText(ChatWindow.hhmmss(self.player.duration()))\n\n # Connect signals\n slider.valueChanged.connect(self.player.setPosition)\n self.player.durationChanged.connect(partial(self.update_duration, slider, total_time))\n self.player.positionChanged.connect(partial(self.update_position, slider, elapsed_time))\n self.player.playbackStateChanged.connect(partial(self.player_state_changed, play_button))\n\n @staticmethod\n def hhmmss(milliseconds: int):\n \"\"\"\n Converts millisecond time in hour, minute and seconds\n \"\"\"\n h, r = divmod(milliseconds, 3_600_000)\n m, r = divmod(r, 60_000)\n s, _ = divmod(r, 1000)\n return (\"%02d:%02d:%02d\" % (h, m, s)) if h else (\"%02d:%02d\" % (m, s))\n\n @staticmethod\n def update_duration(slider: object, total_time: object, duration: int):\n \"\"\"\n Update player duration on GUI\n \"\"\"\n # Update slider maximum value\n slider.setMaximum(duration)\n\n # Show total time on label\n total_time.setText(ChatWindow.hhmmss(duration))\n\n @staticmethod\n def update_position(slider: object, elapsed_time: object, position: int):\n \"\"\"\n Update player position on GUI\n \"\"\"\n # Update time on GUI label\n elapsed_time.setText(ChatWindow.hhmmss(position))\n\n # Disable slider signals to prevent updating triggering a\n # setPosition signal (can cause stuttering).\n slider.blockSignals(True)\n slider.setValue(position)\n slider.blockSignals(False)\n\n def player_state_changed(self, play_button: QPushButton, state: object):\n \"\"\"\n Perform some actions according to the playing state\n \"\"\"\n if state == QMediaPlayer.PlaybackState.PlayingState:\n play_button.setStyleSheet(PlayerStyle.pause)\n\n if state == QMediaPlayer.PlaybackState.PausedState:\n play_button.setStyleSheet(PlayerStyle.play)\n\n elif state == QMediaPlayer.PlaybackState.StoppedState:\n self.player.setPosition(0)\n play_button.setStyleSheet(PlayerStyle.play)\n play_button.setObjectName(None)\n\n def player_error(self, error):\n if error == QMediaPlayer.Error.ResourceError:\n print(\"Player resource error\")\n self.player.stop()\n\n # NETWORKING ----------------------------------------------------------------------\n def scan_network(self):\n \"\"\"\n Scan network to find connected devices and put them in server_host dictionary.\n \"\"\"\n my_ip = utils.get_private_ip()\n if my_ip.startswith(\"127.0\"):\n print(\"Aucune connexion détectée.\\nVeuillez vous connecter à un réseau !\")\n\n else:\n my_ip_bytes = my_ip.split(\".\")\n net_id = \".\".join(my_ip_bytes[:3])\n\n # Create threads\n threads = []\n for host_id in range(1, 255):\n if host_id != int(my_ip_bytes[3]):\n address = f\"{net_id}.{host_id}\"\n scanner = NetScanner(address)\n scanner.signal.scanFinished.connect(self.check_online)\n\n threads.append(scanner)\n\n # Start threads\n for scanner in threads:\n scanner.start()\n\n def check_online(self, hosts: dict):\n \"\"\"\n Checks online devices and show or hide green online indicator widget.\n \"\"\"\n clients = []\n threads = []\n\n for host_address in hosts.keys():\n client = Client(host_address)\n clients.append(client)\n\n th = threading.Thread(target=client.connect_to_server)\n threads.append(th)\n\n for thread in threads:\n thread.start()\n\n for client in clients:\n user = User.first_where(\"host_name\", \"=\", hosts.get(client.server_host))\n\n # Show green online toast cause the client is online\n if user:\n user_uuid = user.get_uuid()\n online_toast = self.ui.left_scroll.findChild(QLabel, f\"{user_uuid}_toast\")\n\n if client.online:\n print(f\"[+] {client.server_host} online\")\n online_toast.show()\n\n # Send my ID to the connected client\n message = Message()\n message.set_kind(\"ID\")\n client.send_message(message)\n\n else:\n print(f\"[-] {client.server_host} offline\")\n online_toast.hide()\n else:\n if client.online:\n self.add_user(client.server_host, hosts.get(client.server_host))\n\n def add_user(self, host_address, host_name):\n \"\"\"\n Add new user in the database\n \"\"\"\n # Save user database\n if host_name is not None:\n host_name = host_name.capitalize()\n print(f\"Adding new user {host_name}\")\n user = User()\n user.set_user_name(host_name)\n user.set_host_address(host_address)\n user.set_host_name(host_name)\n user.set_image_path()\n user.save()\n\n self.ui.show_user_widget(user, online=True)\n\n\nif __name__ == \"__main__\":\n app = QApplication.instance()\n if not app:\n app = QApplication(sys.argv)\n chat_window = ChatWindow()\n chat_window.show()\n sys.exit(app.exec())\n","repo_name":"AntaresMugisho/AR_Intercom","sub_path":"chat_window.py","file_name":"chat_window.py","file_ext":"py","file_size_in_byte":18116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43088025522","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\n# Read the column names from the file\ncol_names = []\nwith open(r\"C://Users/marti/PycharmProjects/Sujets_Speciaux/OpportunityUCIDataset/dataset/column_names.txt\") as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip()\n if line.split(':')[0] == 'Column':\n if int(line.split(' ')[1]) == 1 or int(line.split(' ')[1]) >= 244:\n col_names.append(line.split(' ')[2])\n else:\n name = line.split(' ', maxsplit=2)[-1].split(';')[0]\n if name in col_names:\n name = name.replace(\"X\", \"Y\")\n if name in col_names:\n name = name.replace(\"Y\", \"Z\")\n col_names.append(name)\n else:\n col_names.append(line.split(' ', maxsplit=2)[-1].split(';')[0])\n\n# Set the folder path\nfolder_path = r\"C://Users/marti/PycharmProjects/Sujets_Speciaux/OpportunityUCIDataset/dataset/\"\n\n# Read the data files for different subjects\ndf_s1 = pd.read_table(folder_path + 'S1-Drill.dat', sep=\"\\s+\", names=col_names)\ndf_s2 = pd.read_table(folder_path + 'S2-Drill.dat', sep=\"\\s+\", names=col_names)\ndf_s3 = pd.read_table(folder_path + 'S3-Drill.dat', sep=\"\\s+\", names=col_names)\ndf_s4 = pd.read_table(folder_path + 'S4-Drill.dat', sep=\"\\s+\", names=col_names)\n\n# Create a list of dataframes for each subject\ndfs = [df_s1, df_s2, df_s3, df_s4]\n\n# Define unused labels and the kept label\nunused_labels = ['Locomotion', 'HL_Activity', 'LL_Left_Arm', 'LL_Left_Arm_Object', 'LL_Right_Arm', 'LL_Right_Arm_Object']\nkept_label = ['ML_Both_Arms']\n\n# Preprocess the dataframes for each subject\nfor i, df in enumerate(dfs):\n for col in df.columns:\n if col.split(' ')[0] == 'REED' or col in unused_labels:\n dfs[i] = dfs[i].drop(col, axis=1)\n dfs[i] = dfs[i].fillna(method='ffill')\n dfs[i] = dfs[i].fillna(0)\n\nsubs = [\"S1\", \"S2\", \"S3\", \"S4\"]\ncolors = ['red', 'blue', 'green', 'orange']\n\n# Plot line plots and kernel density estimate plots for each column\nfor col in dfs[0].columns[1:]:\n fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(50, 15))\n\n for i, df in enumerate(dfs):\n # Rename the column as per the subject\n dfs[i][subs[i]] = dfs[i][col]\n\n # Plot line plot\n dfs[i].plot(x=\"MILLISEC\", y=[subs[i]], color=colors[i], ls=\"--\", ax=axs[0])\n axs[0].set_title('Line Plot')\n\n # Plot kernel density estimate plot\n dfs[i].plot(x=\"MILLISEC\", y=[subs[i]], color=colors[i], kind=\"kde\", ax=axs[1])\n axs[1].set_title('Kernel Density Estimate Plot')\n\n fig.suptitle(col + \" for the different subject drill runs\")\n plt.savefig('./exploration/Line_and_KDE_Drills/' + col + '.jpg')\n plt.close()\n\nprint(\"done\")\n","repo_name":"Martin-blckrt/OPPORTUNITY_Activity_Recognition","sub_path":"exploration/plot_drills_kde.py","file_name":"plot_drills_kde.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"45443206","text":"import random\n\nfrom django.contrib import messages\nfrom django.core.cache import cache\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.db import connection\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, get_object_or_404\n\nfrom geekshop import settings\nfrom .models import ProductCategory, Product\n\n\ndef get_products():\n if settings.LOW_CACHE:\n key = 'all_products'\n products = cache.get(key)\n if products is None:\n products = Product.get_items()\n cache.set(key, products)\n return products\n return Product.get_items()\n\n\ndef get_products_by_category(pk):\n if settings.LOW_CACHE:\n key = f'category_{pk}_products'\n products = cache.get(key)\n if products is None:\n products = Product.get_items().filter(category_id=pk)\n cache.set(key, products)\n return products\n return Product.get_items().filter(category_id=pk)\n\n\ndef main(request):\n\n context = {\n 'page_title': 'main',\n }\n\n return render(request, 'mainapp/index.html', context)\n\n\ndef products(request):\n hot_product = get_hot_product()\n context = {\n 'page_title': 'catalogue',\n 'hot_product': hot_product,\n 'same_products': same_products(hot_product),\n }\n return render(request, 'mainapp/products.html', context)\n\n\ndef same_products(hot_product):\n return Product.objects.filter(category=hot_product.category, is_active=True).exclude(pk=hot_product.pk)[:3]\n\n\ndef get_hot_product():\n product_ids = get_products().values_list('id', flat=True)\n random_id = random.choice(product_ids)\n return Product.objects.get(pk=random_id)\n\n\ndef product_page(request, pk):\n product = get_object_or_404(Product, pk=pk)\n context = {\n 'page_title': 'product page',\n 'product': product,\n }\n return render(request, 'mainapp/product_page.html', context)\n\n\ndef contact(request):\n\n locations = [\n {'city': 'Moscow',\n 'phone': '+7-111-111-1111',\n 'email': 'moscow@email.ru',\n 'address': 'center'},\n {'city': 'Tver',\n 'phone': '+7-222-222-2222',\n 'email': 'tver@email.ru',\n 'address': 'suburbs'},\n {'city': 'Kaluga',\n 'phone': '+7-333-333-333',\n 'email': 'kaluga@email.ru',\n 'address': 'business center'}\n ]\n\n context = {\n 'page_title': 'contacts',\n 'locations': locations,\n }\n return render(request, 'mainapp/contact.html', context)\n\n\ndef category(request, pk):\n page_num = request.GET.get('page', 1)\n if pk == 0:\n category = {'pk': 0, 'name': 'all'}\n products = get_products()\n else:\n category = get_object_or_404(ProductCategory, pk=pk)\n products = get_products_by_category(pk)\n\n products_paginator = Paginator(products, 2)\n try:\n products = products_paginator.page(page_num)\n except PageNotAnInteger:\n products = products_paginator.page(1)\n except EmptyPage:\n products = products_paginator.page(products_paginator.num_pages)\n\n context = {\n 'page_title': 'goods of the category',\n 'category': category,\n 'products': products,\n }\n return render(request, 'mainapp/category_products.html', context)\n\n\ndef get_product_price(request, pk):\n if request.is_ajax():\n product = Product.objects.filter(pk=pk).first()\n return JsonResponse({'price': product and product.price or 0})\n\n\ndef db_profile_by_type(sender, q_type, queries):\n print(f'db profile {q_type} for {sender}:')\n for query in filter(lambda x: q_type in x,\n map(lambda x: x['sql'],\n queries)):\n print(query)\n\n\n@receiver(pre_save, sender=ProductCategory)\ndef update_prod_cat_save(sender, instance, **kwargs):\n if instance.pk:\n if instance.is_active:\n instance.product_set.update(is_active=True)\n else:\n instance.product_set.update(is_active=False)\n\n db_profile_by_type(sender, 'UPDATE', connection.queries)\n\n\nproducts_list = Product.objects.all()\n\n","repo_name":"DmitryRybko/geekshop2","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33344953206","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as data\nfrom torch.distributions import *\nfrom torchdyn.datasets import *\nfrom torchdyn.models import *\n\n\ndef test_adjoint_autograd():\n \"\"\"Compare ODE Adjoint vs Autograd gradients, s := [0, 1], adaptive-step\"\"\"\n d = ToyDataset()\n X, yn = d.generate(n_samples=512, dataset_type='moons', noise=.4)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n X_train = torch.Tensor(X).to(device)\n y_train = torch.LongTensor(yn.long()).to(device)\n train = data.TensorDataset(X_train, y_train)\n trainloader = data.DataLoader(train, batch_size=len(X), shuffle=False)\n\n f = nn.Sequential(\n nn.Linear(2, 64),\n nn.Tanh(),\n nn.Linear(64, 2))\n\n model = NeuralDE(f, solver='dopri5', sensitivity='adjoint', atol=1e-6, rtol=1e-6).to(device)\n x, y = next(iter(trainloader))\n # adjoint gradients\n y_hat = model(x)\n loss = nn.CrossEntropyLoss()(y_hat, y)\n loss.backward()\n adj_grad = torch.cat([p.grad.flatten() for p in model.parameters()])\n # autograd gradients\n model.zero_grad()\n model.sensitivity= 'autograd'\n y_hat = model(x)\n loss = nn.CrossEntropyLoss()(y_hat, y)\n loss.backward()\n bp_grad = torch.cat([p.grad.flatten() for p in model.parameters()])\n assert (torch.abs(bp_grad - adj_grad) <= 1e-4).all()\n\nif __name__ == '__main__':\n print(f'Testing regular CNF with autograd trace...')\n test_adjoint_autograd()\n","repo_name":"ChrisDeGrendele/torchdyn","sub_path":"test/test_ode_adjoint.py","file_name":"test_ode_adjoint.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"43419043475","text":"# -*- coding: utf-8 -*-\r\n\"\"\"lits_out.ipynb\r\n\r\n\"\"\"\r\n\r\nfrom kivy.app import App\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.lang import Builder\r\nfrom tensorflow.keras.models import model_from_json\r\nimport os\r\nimport nibabel as nib\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\n\r\njson_file = open('model_json.json', 'r')\r\nloaded_model_json = json_file.read()\r\njson_file.close()\r\nloaded_model = model_from_json(loaded_model_json)\r\n# load weights into new model\r\nloaded_model.load_weights(\"model_weights.h5\")\r\nprint(\"Loaded model from disk\")\r\n\r\ninput_shape = [64, 64, 1]\r\n\r\ndef slice_to_patch(slice, patch_ratio):\r\n \r\n slice[slice == 1] = 0\r\n slice[slice == 2] = 1\r\n \r\n patch_list = []\r\n \r\n for x_bin in range(2, len(patch_ratio)):\r\n for y_bin in range(2, len(patch_ratio)):\r\n patch = slice[patch_ratio[x_bin-2] : patch_ratio[x_bin], patch_ratio[y_bin - 2] : patch_ratio[y_bin]]\r\n patch = patch.reshape(patch.shape + (1,))\r\n patch_list.append(patch)\r\n \r\n return np.array(patch_list)\r\n\r\ndef patch_to_slice(patch, patch_ratio, input_shape, conf_threshold):\r\n \r\n slice = np.zeros((512, 512, 1))\r\n row_idx = 0\r\n col_idx = 0\r\n \r\n for i in range(len(patch)):\r\n \r\n slice[patch_ratio[row_idx]:patch_ratio[row_idx + 2], patch_ratio[col_idx]:patch_ratio[col_idx + 2]][patch[i] > conf_threshold] = 1\r\n \r\n col_idx += 1\r\n \r\n if i != 0 and (i+1) % 15 == 0:\r\n row_idx += 1\r\n col_idx = 0\r\n \r\n return slice\r\n\r\nBuilder.load_string(\"\"\"\r\n:\r\n id: my_widget\r\n FileChooserListView:\r\n id: filechooser\r\n on_selection: my_widget.selected(filechooser.selection) \r\n Button\r\n text: \"open\"\r\n on_release: my_widget.open(filechooser.path, filechooser.selection) \r\n\"\"\")\r\n\r\n\r\n\r\nclass MyWidget(BoxLayout):\r\n \r\n\r\n def open(self, path, filename): \r\n img_path=os.path.join(path, filename[0])\r\n img_ex = nib.load(img_path).get_data()\r\n r,c,ch=img_ex.shape \r\n # img_ex=img_ex1[:,:,100]\r\n #mask_ex = nib.load(mask_path[25]).get_data()\r\n \r\n output_img=np.zeros(img_ex.shape)\r\n patch_ratio = []\r\n for i in range(16 + 1):\r\n patch_ratio.append(32 * i)\r\n for i in range(0,img_ex.shape[2]): \r\n\r\n patch_ex = slice_to_patch(img_ex[:, :, i], patch_ratio)\r\n prediction = loaded_model.predict(patch_ex)\r\n prediction_mask = patch_to_slice(prediction, patch_ratio, input_shape, conf_threshold = 0.97)\r\n prediction_mask1=img_ex[:,:,0]\r\n # cv2.imwrite('output.jpg',prediction_mask1)\r\n \r\n # self.ids.image.source='output.jpg'\r\n \r\n fig, (ax1,ax3) = plt.subplots(1, 2, figsize = ((15, 15)))\r\n \r\n ax1.imshow(np.rot90(img_ex[:, :, i], 3), cmap = 'bone')\r\n ax1.set_title(\"Image\", fontsize = \"x-large\")\r\n ax1.grid(False)\r\n \r\n ax3.imshow(np.rot90(prediction_mask.reshape((512, 512)), 3), cmap = 'bone')\r\n ax3.set_title(\"Mask (Pred)\", fontsize = \"x-large\")\r\n ax3.grid(False)\r\n plt.show()\r\n # plt.close()\r\n print('Finished')\r\n \r\n \r\n \r\n def selected(self, filename):\r\n print (\"selected: %s\" % filename[0])\r\n \r\n\r\nclass MyApp(App):\r\n def build(self):\r\n return MyWidget()\r\n\r\nif __name__ == '__main__':\r\n MyApp().run()","repo_name":"Auggen21/LITS-Challenge","sub_path":"aap-lits.py","file_name":"aap-lits.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"44"} +{"seq_id":"15781173863","text":"from output_utils.headers.stats_sheet_headers import (\n batter_stats_headers, batter_stats_hidden_columns, batter_stats_freeze_col, ovr_batters_stats_headers,\n sp_stats_headers, sp_stats_hidden_columns, sp_stats_freeze_col, \n rp_stats_headers, rp_stats_hidden_columns, rp_stats_freeze_col\n)\nfrom output_utils.sheets.generate_worksheet import generate_worksheet\nfrom output_utils.progress.progress_bar import ProgressBar\nimport xlsxwriter\n\ndef generate_stats_workbook(ovr_data, vl_data, vr_data):\n batter_ovr_stats_cards = []\n sp_ovr_stats_cards = []\n rp_ovr_stats_cards = []\n\n progress_bar = ProgressBar(len(ovr_data.keys()) + len(vl_data.keys()) + len(vr_data.keys()), \"Sorting cards for sheet statistics\")\n\n for card in ovr_data.values():\n if card[\"pa\"] > 10:\n batter_ovr_stats_cards.append(card)\n if card[\"sp_ip\"] > 5:\n sp_ovr_stats_cards.append(card)\n if card[\"rp_ip\"] > 5:\n rp_ovr_stats_cards.append(card)\n progress_bar.increment()\n\n batter_vl_stats_cards = []\n sp_vl_stats_cards = []\n rp_vl_stats_cards = []\n\n for card in vl_data.values():\n if card[\"pa\"] > 10:\n batter_vl_stats_cards.append(card)\n if card[\"sp_ip\"] > 5:\n sp_vl_stats_cards.append(card)\n if card[\"rp_ip\"] > 5:\n rp_vl_stats_cards.append(card)\n progress_bar.increment()\n\n batter_vr_stats_cards = []\n sp_vr_stats_cards = []\n rp_vr_stats_cards = []\n\n for card in vr_data.values():\n if card[\"pa\"] > 10:\n batter_vr_stats_cards.append(card)\n if card[\"sp_ip\"] > 5:\n sp_vr_stats_cards.append(card)\n if card[\"rp_ip\"] > 5:\n rp_vr_stats_cards.append(card)\n progress_bar.increment()\n progress_bar.finish()\n\n sheet_pbar = ProgressBar(1, \"Creating stats sheet\")\n workbook = xlsxwriter.Workbook('output/PTStatsSheet.xlsx')\n batter_ovr_stats_sheet = workbook.add_worksheet(\"BAT-Ovr\")\n batter_vl_stats_sheet = workbook.add_worksheet(\"BAT-vL\")\n batter_vr_stats_sheet = workbook.add_worksheet(\"BAT-vR\")\n sp_ovr_stats_sheet = workbook.add_worksheet(\"SP-Ovr\")\n sp_vl_stats_sheet = workbook.add_worksheet(\"SP-vL\")\n sp_vr_stats_sheet = workbook.add_worksheet(\"SP-vR\")\n rp_ovr_stats_sheet = workbook.add_worksheet(\"RP-Ovr\")\n rp_vl_stats_sheet = workbook.add_worksheet(\"RP-vL\")\n rp_vr_stats_sheet = workbook.add_worksheet(\"RP-vR\")\n sheet_pbar.finish()\n\n # Sort cards for sheet\n sort_pbar = ProgressBar(9, \"Sorted all cards\")\n sort_pbar.update(\"Sorting ovr batter cards\")\n batter_ovr_stats_cards.sort(key=lambda pd: pd[\"war_600_pa_ft\"], reverse=True)\n sort_pbar.increment(\"Sorting vL batter cards\")\n batter_vl_stats_cards.sort(key=lambda pd: pd[\"war_600_pa\"], reverse=True)\n sort_pbar.increment(\"Sorting vR batter cards\")\n batter_vr_stats_cards.sort(key=lambda pd: pd[\"war_600_pa\"], reverse=True)\n sort_pbar.increment(\"Sorting ovr SP cards\")\n sp_ovr_stats_cards.sort(key=lambda pd: pd[\"sp_war_per_220_ip\"], reverse=True)\n sort_pbar.increment(\"Sorting vL SP cards\")\n sp_vl_stats_cards.sort(key=lambda pd: pd[\"sp_war_per_220_ip\"], reverse=True)\n sort_pbar.increment(\"Sorting vR SP cards\")\n sp_vr_stats_cards.sort(key=lambda pd: pd[\"sp_war_per_220_ip\"], reverse=True)\n sort_pbar.increment(\"Sorting ovr RP cards\")\n rp_ovr_stats_cards.sort(key=lambda pd: pd[\"rp_war_per_100_ip\"], reverse=True)\n sort_pbar.increment(\"Sorting vL RP cards\")\n rp_vl_stats_cards.sort(key=lambda pd: pd[\"rp_war_per_100_ip\"], reverse=True)\n sort_pbar.increment(\"Sorting vR RP cards\")\n rp_vr_stats_cards.sort(key=lambda pd: pd[\"rp_war_per_100_ip\"], reverse=True)\n sort_pbar.finish()\n\n generate_worksheet(batter_ovr_stats_cards, batter_ovr_stats_sheet, ovr_batters_stats_headers, batter_stats_freeze_col, batter_stats_hidden_columns, \"batter ovr stats\")\n generate_worksheet(batter_vl_stats_cards, batter_vl_stats_sheet, batter_stats_headers, batter_stats_freeze_col, batter_stats_hidden_columns, \"batter vL stats\")\n generate_worksheet(batter_vr_stats_cards, batter_vr_stats_sheet, batter_stats_headers, batter_stats_freeze_col, batter_stats_hidden_columns, \"batter vR stats\")\n generate_worksheet(sp_ovr_stats_cards, sp_ovr_stats_sheet, sp_stats_headers, sp_stats_freeze_col, sp_stats_hidden_columns, \"sp ovr stats\")\n generate_worksheet(sp_vl_stats_cards, sp_vl_stats_sheet, sp_stats_headers, sp_stats_freeze_col, sp_stats_hidden_columns, \"sp vL stats\")\n generate_worksheet(sp_vr_stats_cards, sp_vr_stats_sheet, sp_stats_headers, sp_stats_freeze_col, sp_stats_hidden_columns, \"sp vR stats\")\n generate_worksheet(rp_ovr_stats_cards, rp_ovr_stats_sheet, rp_stats_headers, rp_stats_freeze_col, rp_stats_hidden_columns, \"rp ovr stats\")\n generate_worksheet(rp_vl_stats_cards, rp_vl_stats_sheet, rp_stats_headers, rp_stats_freeze_col, rp_stats_hidden_columns, \"rp vL stats\")\n generate_worksheet(rp_vr_stats_cards, rp_vr_stats_sheet, rp_stats_headers, rp_stats_freeze_col, rp_stats_hidden_columns, \"rp vR stats\")\n\n close_pbar = ProgressBar(1, \"Closing stats sheet file\")\n workbook.close()\n close_pbar.finish()\n print()","repo_name":"paoloruiz/ootp-pt-analysis","sub_path":"output_utils/sheets/generate_stats_workbook.py","file_name":"generate_stats_workbook.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"35744653782","text":"def solution(n):\n b = format(n,\"b\")\n if \"1\"*len(b) == b or (b.count(\"0\") == 1 and b[-1] == \"0\"):\n next = sorted(list(b+\"0\"))\n next = [next[-1]] + next[:-1]\n return int(\"\".join(next),2)\n next = list(b)\n idx = 0\n for i in range(len(next)-1,-1,-1):\n if next[i] == \"1\" and next[i-1] == \"0\":\n next[i],next[i-1] = next[i-1],next[i]\n idx = i\n break\n next = next[:idx+1] + sorted(next[idx+1:])\n return int(\"\".join(next),2)\n ","repo_name":"koseyeon/ProblemSolving","sub_path":"프로그래머스/lv2/12911. 다음 큰 숫자/다음 큰 숫자.py","file_name":"다음 큰 숫자.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"42978284279","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 24 16:51:12 2019\n\n@author: Joshua Peeples\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport time\nimport copy\nimport pdb\nfrom barbar import Bar\nfrom Functions.Loss_functions import QMI, MCE\n\ndef train_SAE(model, dataloaders, optimizer, device='cpu',num_epochs=25):\n since = time.time()\n\n train_error_history = []\n val_error_history = []\n \n MSE_loss = nn.MSELoss()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_loss = np.inf\n \n model = model.to(device)\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch+1, num_epochs))\n print('-' * 10)\n\n \n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode \n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0 \n # Iterate over data.\n for idx, (inputs, labels) in enumerate(Bar(dataloaders[phase])):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n \n with torch.set_grad_enabled(phase=='train'):\n \n # forward pass\n outputs = model(inputs)\n loss = MSE_loss(outputs, torch.flatten(inputs,start_dim=1))\n \n # backward pass\n if phase == 'train':\n loss.backward()\n optimizer.step()\n \n # statistics\n running_loss += loss.item() * inputs.size(0)\n\n epoch_loss = running_loss / len(dataloaders[phase].sampler)\n \n if phase == 'train':\n train_error_history.append(epoch_loss)\n\n print()\n print('{} Loss: {:.4f}'.format(phase, epoch_loss))\n\n # deep copy the model\n if phase == 'val' and epoch_loss < best_loss:\n best_epoch = epoch+1\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n \n if phase == 'val':\n val_error_history.append(epoch_loss)\n print()\n\n \n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n print('Best validation loss: {:4f} at Epoch {:.0f}'.format(best_loss,best_epoch))\n\n # load best model weights\n #Fit model on hold out test set\n model.load_state_dict(best_model_wts)\n \n return (model, best_model_wts, train_error_history,val_error_history,best_loss,time_elapsed)\n \ndef train_classifier(model, dataloaders, optimizer,criterion = 'CE', device='cpu',num_epochs=25,bw=.1,ITL=False):\n since = time.time()\n\n val_acc_history = []\n train_acc_history = []\n train_error_history = []\n val_error_history = []\n CE = nn.CrossEntropyLoss()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n model = model.to(device)\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch+1, num_epochs))\n print('-' * 10)\n \n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode \n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n \n # Iterate over data.\n for idx, (inputs, labels) in enumerate(Bar(dataloaders[phase])):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n \n with torch.set_grad_enabled(phase=='train'):\n \n # forward pass\n outputs = model(inputs)\n if criterion == 'QMI': #Put MI for mutual information\n loss = QMI(outputs, labels,bw)\n elif criterion == 'MCE': #MCE\n loss = MCE(outputs,labels,bw)\n else:\n loss = CE(outputs,labels)\n \n _, preds = torch.max(outputs, 1)\n # backward pass\n if phase == 'train':\n loss.backward()\n optimizer.step()\n \n # statistics\n running_loss += loss.item() * inputs.size(0)\n #If ITL losses, one-hot encoded labels needed to be \n #converted to integers\n if(ITL):\n _, true_labels = torch.max(labels,1)\n running_corrects += torch.sum(preds == true_labels.data)\n else:\n running_corrects += torch.sum(preds == labels.data) \n\n epoch_loss = running_loss / len(dataloaders[phase].sampler)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].sampler)\n \n if phase == 'train':\n train_error_history.append(epoch_loss)\n train_acc_history.append(epoch_acc)\n\n print()\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_epoch = epoch+1\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n \n if phase == 'val':\n val_error_history.append(epoch_loss)\n val_acc_history.append(epoch_acc)\n \n print()\n\n \n \n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n print('Best validation accuracy: {:4f} at Epoch {:.0f}'.format(best_acc,best_epoch))\n\n # load best model weights\n #Fit model on hold out test set\n model.load_state_dict(best_model_wts)\n \n #Get predictions for test set\n GT,predictions = predict(dataloaders['test'],model,device,ITL=ITL)\n \n return (model, best_model_wts, train_error_history,val_error_history,best_acc,\n time_elapsed,GT,predictions)\n\ndef predict(dataloader,model,device,ITL=False):\n #Initialize and accumalate ground truth and predictions\n GT = np.array(0)\n Predictions = np.array(0)\n running_corrects = 0\n model = model.to(device)\n model = nn.Sequential(model,nn.Softmax(dim=1))\n model.eval()\n # Iterate over data.\n with torch.no_grad():\n #for idx, (inputs, labels,index) in Bar(enumerate(dataloader)):\n for idx, (inputs, labels) in enumerate(Bar(dataloader)):\n inputs = inputs.to(device)\n labels = labels.to(device)\n \n # forward\n outputs = model(inputs)\n _, preds = torch.max(outputs,1)\n \n #If test, accumulate labels for confusion matrix\n if(ITL):\n _, labels = torch.max(labels,1)\n GT = np.concatenate((GT,labels.detach().cpu().numpy()),axis=None)\n else:\n GT = np.concatenate((GT,labels.detach().cpu().numpy()),axis=None)\n \n Predictions = np.concatenate((Predictions,preds.detach().cpu().numpy()),axis=None)\n running_corrects += torch.sum(preds == labels.data)\n\n test_acc = running_corrects.double() / len(dataloader.sampler)\n print('Test Accuracy: {:4f}'.format(test_acc)) \n\n return GT[1:],Predictions[1:]\n\n\n","repo_name":"jpeeples67/Fall_2019_EEL_6814_Deep_Learning","sub_path":"Project_2/Functions/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":7979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16657931032","text":"#!/usr/bin/python3\n\nimport requests\nimport yaml\nfrom pprint import pprint\n\nimport swisssign_ra_api.v2\nfrom swisssign_ra_api.v2.api import api_registration_api\nfrom swisssign_ra_api.v2.model.client import Client\n\nclass RaApiSession():\n def __init__(self, config_file='./account.stage.yml'):\n\n with open(config_file,encoding='utf-8') as y:\n config = yaml.load(y, Loader=yaml.FullLoader)\n\n client=config['client']\n serviceaccount=config['serviceaccount']\n user_secret=config['secret']\n baseurl = config['baseurl']\n\n self.config = config\n\n self.session = requests.Session()\n if 'proxy' in config:\n proxies = {\n 'https': config['proxy']\n }\n self.session.proxies.update(proxies)\n\n configuration = swisssign_ra_api.v2.Configuration(\n host = baseurl\n )\n configuration.api_key_prefix['ApiKeyAuth'] = 'Bearer'\n configuration.username = serviceaccount\n configuration.discard_unknown_keys = True\n\n # Get JWT\n self.api_client = swisssign_ra_api.v2.ApiClient(configuration)\n self.api = api_registration_api.ApiRegistrationApi(self.api_client)\n jwt = self.api.jwt(user_secret=user_secret, user_name=serviceaccount)\n configuration.api_key['ApiKeyAuth'] = jwt\n\n #pprint(configuration.auth_settings())\n\n clients = self.api.search_clients(search=client)\n if len(clients) > 1:\n raise Exception('more than one client matches to %s' % client)\n\n self.client:Client\n self.client = clients[0]\n","repo_name":"SwissSign-AG/raapi-python-example-openapi","sub_path":"swisspki_session.py","file_name":"swisspki_session.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33230165613","text":"# !/usr/bin/env python\r\n# -*- coding = utf-8 -*-\r\n# @time:2022/3/6 14:21\r\n# Author:sssmmmx\r\n# @File:trading_volume_bot_2.py\r\n# @Software:PyCharm\r\n\r\n\"\"\"\r\n方案2:\r\n单边刷量,单边套保。\r\n代码逻辑:对于同一个永续合约标的 在开启一组多单和空单后,单边选择一个方向进行等差刷量,在到达一定百分比后,平仓所有标的,重开一组,依次循环。\r\n消耗预估:手续费0.05% + 损耗0.05% 合计约 0.1%\r\n时间周期:较快\r\n\"\"\"\r\n\r\nimport time\r\nfrom Futures import FuturesAPI\r\n\r\n\r\n# 设立一组多空单\r\ndef make_init_com():\r\n while True:\r\n book_info = futures_api.order_book(symbol)\r\n # 监测 卖1 和 买1 的价差大于0.1%\r\n if (float(book_info[\"asks\"][0][\"p\"]) - float(book_info[\"bids\"][0][\"p\"])) / float(book_info[\"bids\"][0][\"p\"]) > 0.001:\r\n print(\"进行下单\")\r\n px = (float(book_info[\"asks\"][0][\"p\"]) + float(book_info[\"bids\"][0][\"p\"])) / 2\r\n f = f\".{order_price_round.count('0')}f\"\r\n px = f\"{format(px, f)}\"\r\n buy = futures_api.create_orders(symbol, 1, px, \"gtc\")\r\n sell = futures_api.create_orders(symbol, -1, px, \"gtc\")\r\n\r\n while True:\r\n # 检查是否完成交易 \"status\": \"finished\"\r\n if futures_api.query_orders(buy['id'])[\"status\"] == \"finished\" and futures_api.query_orders(sell['id'])[\"status\"] == \"finished\":\r\n print(\"已完成一组交易\")\r\n break\r\n else:\r\n time.sleep(1)\r\n\r\n break\r\n else:\r\n time.sleep(1)\r\n\r\n return buy['id'], sell['id']\r\n\r\n\r\n# 一组多空单平仓\r\ndef close_com():\r\n while True:\r\n time.sleep(1)\r\n book_info = futures_api.order_book(symbol)\r\n # 监测 卖1 和 买1 的价差大于0.1%\r\n if (float(book_info[\"asks\"][0][\"p\"]) - float(book_info[\"bids\"][0][\"p\"])) / float(\r\n book_info[\"bids\"][0][\"p\"]) > 0.001:\r\n print(\"进行下单\")\r\n px = (float(book_info[\"asks\"][0][\"p\"]) + float(book_info[\"bids\"][0][\"p\"])) / 2\r\n f = f\".{order_price_round.count('0')}f\"\r\n px = f\"{format(px, f)}\"\r\n\r\n # 进行平仓 限价px\r\n m = futures_api.close_orders(symbol, px, \"close_long\", \"gtc\")\r\n n = futures_api.close_orders(symbol, px, \"close_short\", \"gtc\")\r\n\r\n while True:\r\n time.sleep(1)\r\n # 检查是否完成交易 \"status\": \"finished\"\r\n if futures_api.query_orders(m['id'])[\"status\"] == \"finished\" and futures_api.query_orders(n['id'])[\r\n \"status\"] == \"finished\":\r\n print(\"已完成平仓\")\r\n break\r\n\r\n\r\n# 选用永续合约市场的标的来进行单边刷量,单边套保。\r\np = input(f\"请输入标的:\")\r\n# 转化大写\r\np = p.upper()\r\nsymbol = p + '_USDT'\r\n\r\nfutures_api = FuturesAPI() # 实例化futures_api\r\n\r\nfutures_api.set_dual_mode('usdt') # 设置持仓模式为双向\r\n\r\none_info = futures_api.query_contract_info(symbol) # 获取标的信息\r\n\r\norder_size_min = one_info['order_size_min'] # 最小下单数量\r\norder_price_round = one_info['order_price_round'] # 委托价格最小单位\r\n\r\nprint(f\"最小下单数量:{order_size_min} 委托价格最小单位:{order_price_round}\")\r\nprint(f\"{symbol}开始进行刷交易量\")\r\n\r\n# --------------------------------------------------------------------------\r\n# 设立一组多空单\r\na, b = make_init_com()\r\n\r\n# 开始进行单边刷量\r\ninterval = 0.001 # 0.1%\r\n\r\nt = 0\r\nwhile t < 5:\r\n # 查询价格\r\n ppx = float(futures_api.query_orders(a)[\"price\"]) * (1 + interval)\r\n f = f\".{order_price_round.count('0')}f\"\r\n ppx = f\"{format(ppx, f)}\"\r\n\r\n # 平仓挂单\r\n c = futures_api.close_orders(symbol, ppx, \"close_long\", \"gtc\")\r\n\r\n while True:\r\n # 检查是否完成交易 \"status\": \"finished\"\r\n if futures_api.query_orders(a['id'])[\"status\"] == \"finished\":\r\n a = futures_api.create_orders(symbol, 1, ppx, \"gtc\")\r\n t = t + 1\r\n break\r\n else:\r\n time.sleep(1)\r\nif t == 5:\r\n close_com()\r\n\r\n# --------------------------------------------------------------------------\r\n","repo_name":"sssmmmx/Trading-Volume-Bot-Gate","sub_path":"trading_volume_bot_2.py","file_name":"trading_volume_bot_2.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"22966099216","text":"import click\n\nfrom ens.console import echo\nfrom ens.local import get_local_info\nfrom ens.remote import get_remote\nfrom ens.utils.click import arg_novel\n\n\n@click.command()\n@arg_novel\n@click.option('-l/-r', '--local/--remote', 'local',\n is_flag = True,\n default = True,\n help = '查看本地/远程信息')\ndef info(novel, local):\n if local:\n info = get_local_info(novel)\n else:\n remote = get_remote(novel.remote)\n info = remote.get_info(novel)\n\n echo(info.verbose())","repo_name":"syrinka/Elegant-Novel-Spider","sub_path":"ens/commands/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13114611836","text":"import corner_detection as cd\nimport homography_operations as ho\nimport intrinsic_estimation as intr\nimport extrinsic_estimation as extr\nimport distortion_estimation as de\nimport parameter_refinement as pr\nimport visualize\nimport analysis\nimport logging\nimport util\nimport pathlib\n\n\ndef main():\n pathlib.Path('graphs/').mkdir(parents=True, exist_ok=True)\n obj_points, img_points, img_shapes, img_names = cd.find_corners()\n\n refined_homographies = []\n for index in range(len(img_points)):\n util.info(\"Image Count: \" + str(index + 1))\n h = ho.create_homography(img_points[index], obj_points[index])\n util.info(\"Homography:\\n\" + str(h) + \"\\n\")\n h = ho.refine_homography(h, img_points[index], obj_points[index])\n util.info(\"Refined Homography:\\n\" + str(h) + \"\\n\")\n refined_homographies.append(h)\n # analysis.plot_differences(img_points[index], obj_points[index], h, str(index + 1))\n \n A = intr.compute_intrinsics(refined_homographies)\n util.info(\"Camera Intrinsics:\\n\" + str(A) + \"\\n\")\n\n # Once A is known, the extrinsic parameters for each image is readily computed.\n extrinsics = []\n for h_index in range(len(refined_homographies)):\n E = extr.compute_extrinsics(A, refined_homographies[h_index])\n util.info(\"Camera Extrinsic Matrix For Image-\" + str(h_index + 1) + \":\\n\" + str(E) + \"\\n\")\n extrinsics.append(E)\n\n # As the radial distortion is expected to be small, one would expect to estimate the other five intrinsic parameters,\n # using the Maximum likelihood estimation, reasonable well by simply ignoring distortion. One strategy is then to estimate\n # k1 and k2 after having estimated the other parameters, which will give us the ideal pixel coordinates.\n k = de.estimate_radial_distortion(obj_points, img_points, A, extrinsics)\n util.info(\"Radial Distortion: \\n\" + str(k) + \"\\n\")\n\n K_opt, k_opt, extrinsics_opt = pr.refine(A, k, extrinsics, obj_points, img_points)\n util.info(\"Parameters:\")\n print('\\t Focal Length: [ {:.5f} {:.5f} ]'.format(K_opt[0,0], K_opt[1,1]))\n print('\\tPrincipal Point: [ {:.5f} {:.5f} ]'.format(K_opt[0,2], K_opt[1,2]))\n print('\\t Skew: [ {:.7f} ]'.format(K_opt[0,1]))\n print('\\t Distortion: [ {:.6f} {:.6f} ]'.format(k_opt[0], k_opt[1]))\n\n util.info(\"Projection Matrices for WebGL:\\n\")\n znear, zfar = .1, 1000.\n for idx, e in enumerate(extrinsics_opt):\n p = util.get_camera_matrix(K_opt, e)\n util.info(\"P matrix for image \" + str(idx + 1) + \":\\n\" + str(p))\n decomposed_p = util.decompose(p)\n webgl_p = util.to_opengl_projection(decomposed_p['intrinsic'], 0, 0, img_shapes[idx][0], img_shapes[idx][1], znear, zfar, direction=\"y down\")\n util.info(\"P matrix(WebGl) for image \" + str(idx + 1) + \":\\n\" + str(webgl_p))\n\n visualize.visualize_camera_frame(obj_points[0], extrinsics_opt[0], img_names[0])\n\nif __name__ == \"__main__\":\n logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)\n main()\n","repo_name":"nuwandda/Checkerboard-Calibration","sub_path":"checkerboard_calibration.py","file_name":"checkerboard_calibration.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"35631160000","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"OpenStackClient plugin for Ironic Inspector.\"\"\"\n\nfrom __future__ import print_function\n\nimport json\nimport sys\n\nfrom osc_lib.command import command\nfrom osc_lib import exceptions\nfrom osc_lib.i18n import _\nfrom osc_lib import utils\nimport yaml\n\nimport ironic_inspector_client\nfrom ironic_inspector_client import resource as res\n\n\nAPI_NAME = 'baremetal_introspection'\nAPI_VERSION_OPTION = 'inspector_api_version'\nDEFAULT_API_VERSION = '1'\nAPI_VERSIONS = {\n \"1\": \"ironic_inspector.shell\",\n}\n\nfor mversion in range(ironic_inspector_client.MAX_API_VERSION[1] + 1):\n API_VERSIONS[\"1.%d\" % mversion] = API_VERSIONS[\"1\"]\n\n\ndef make_client(instance):\n url = instance.get_configuration().get('inspector_url')\n if not url:\n url = instance.get_endpoint_for_service_type(\n 'baremetal-introspection', interface=instance.interface,\n region_name=instance._region_name\n )\n return ironic_inspector_client.ClientV1(\n inspector_url=url,\n session=instance.session,\n api_version=instance._api_version[API_NAME],\n interface=instance._interface,\n region_name=instance._region_name)\n\n\ndef build_option_parser(parser):\n # TODO(dtantsur): deprecate these options in favor of more generic OS_*\n parser.add_argument('--inspector-api-version',\n default=utils.env('INSPECTOR_VERSION',\n default=DEFAULT_API_VERSION),\n help='inspector API version, only 1 is supported now '\n '(env: INSPECTOR_VERSION).')\n parser.add_argument('--inspector-url',\n default=utils.env('INSPECTOR_URL', default=None),\n help='inspector URL, defaults to localhost '\n '(env: INSPECTOR_URL).')\n return parser\n\n\nclass StartCommand(command.Lister):\n \"\"\"Start the introspection.\"\"\"\n\n COLUMNS = ('UUID', 'Error')\n\n def get_parser(self, prog_name):\n parser = super(StartCommand, self).get_parser(prog_name)\n parser.add_argument('node', help='baremetal node UUID(s) or name(s)',\n nargs='+')\n parser.add_argument('--wait',\n action='store_true',\n help='wait for introspection to finish; the result'\n ' will be displayed in the end')\n parser.add_argument('--check-errors',\n action='store_true',\n help='check if errors occurred during the'\n ' introspection; if any error occurs only the'\n ' errors are displayed; can only be used with'\n ' --wait')\n return parser\n\n def take_action(self, parsed_args):\n if parsed_args.check_errors and not parsed_args.wait:\n raise exceptions.CommandError(\n _(\"--check-errors can only be used with --wait\"))\n\n client = self.app.client_manager.baremetal_introspection\n for uuid in parsed_args.node:\n client.introspect(uuid)\n\n if parsed_args.wait:\n print('Waiting for introspection to finish...', file=sys.stderr)\n result = client.wait_for_finish(parsed_args.node)\n result = [(uuid, s.get('error'))\n for uuid, s in result.items()]\n if parsed_args.check_errors:\n uuids_errors = \"\\n\".join(\"%s (%s)\" % node_info\n for node_info in result\n if node_info[1] is not None)\n if uuids_errors:\n raise Exception(\n _(\"Introspection failed for some nodes: %s\")\n % uuids_errors)\n else:\n result = []\n\n return self.COLUMNS, result\n\n\nclass ReprocessCommand(command.Command):\n \"\"\"Reprocess stored introspection data\"\"\"\n\n def get_parser(self, prog_name):\n parser = super(ReprocessCommand, self).get_parser(prog_name)\n parser.add_argument('node', help='baremetal node UUID or name')\n return parser\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.baremetal_introspection\n client.reprocess(parsed_args.node)\n\n\nclass StatusCommand(command.ShowOne):\n \"\"\"Get introspection status.\"\"\"\n hidden_status_items = {'links'}\n\n @classmethod\n def status_attributes(cls, client_item):\n \"\"\"Get status attributes from an API client dict.\n\n Filters the status fields according to the cls.hidden_status_items\n :param client_item: an item returned from either the get_status or the\n list_statuses client method\n :return: introspection status as a list of name, value pairs\n \"\"\"\n return [item for item in client_item.items()\n if item[0] not in cls.hidden_status_items]\n\n def get_parser(self, prog_name):\n parser = super(StatusCommand, self).get_parser(prog_name)\n parser.add_argument('node', help='baremetal node UUID or name')\n return parser\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.baremetal_introspection\n status = client.get_status(parsed_args.node)\n return zip(*sorted(self.status_attributes(status)))\n\n\nclass StatusListCommand(command.Lister):\n \"\"\"List introspection statuses\"\"\"\n\n COLUMNS = ('UUID', 'Started at', 'Finished at', 'Error')\n\n @classmethod\n def status_row(cls, client_item):\n \"\"\"Get a row from a client_item.\n\n The row columns are filtered&sorted according to cls.COLUMNS.\n\n :param client_item: an item returned from either the get_status or the\n list_statuses client method.\n :return: a list of client_item attributes as the row\n \"\"\"\n status = dict(StatusCommand.status_attributes(client_item))\n return utils.get_dict_properties(status, cls.COLUMNS)\n\n def get_parser(self, prog_name):\n parser = super(StatusListCommand, self).get_parser(prog_name)\n parser.add_argument('--marker', help='UUID of the last item on the '\n 'previous page', default=None)\n parser.add_argument('--limit', help='the amount of items to return',\n type=int, default=None)\n return parser\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.baremetal_introspection\n statuses = client.list_statuses(marker=parsed_args.marker,\n limit=parsed_args.limit)\n rows = [self.status_row(status) for status in statuses]\n return self.COLUMNS, rows\n\n\nclass AbortCommand(command.Command):\n \"\"\"Abort running introspection for node.\"\"\"\n\n def get_parser(self, prog_name):\n parser = super(AbortCommand, self).get_parser(prog_name)\n parser.add_argument('node', help='baremetal node UUID or name')\n return parser\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.baremetal_introspection\n client.abort(parsed_args.node)\n\n\nclass RuleImportCommand(command.Lister):\n \"\"\"Import one or several introspection rules from a JSON/YAML file.\"\"\"\n\n COLUMNS = (\"UUID\", \"Description\")\n\n def get_parser(self, prog_name):\n parser = super(RuleImportCommand, self).get_parser(prog_name)\n parser.add_argument('file', help='JSON or YAML file to import, may '\n 'contain one or several rules')\n return parser\n\n def take_action(self, parsed_args):\n with open(parsed_args.file, 'r') as fp:\n rules = yaml.safe_load(fp)\n if not isinstance(rules, list):\n rules = [rules]\n client = self.app.client_manager.baremetal_introspection\n result = []\n for rule in rules:\n result.append(client.rules.from_json(rule))\n result = [tuple(rule.get(col.lower()) for col in self.COLUMNS)\n for rule in result]\n return self.COLUMNS, result\n\n\nclass RuleListCommand(command.Lister):\n \"\"\"List all introspection rules.\"\"\"\n\n COLUMNS = (\"UUID\", \"Description\")\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.baremetal_introspection\n rules = client.rules.get_all()\n rules = [tuple(rule.get(col.lower()) for col in self.COLUMNS)\n for rule in rules]\n return self.COLUMNS, rules\n\n\nclass RuleShowCommand(command.ShowOne):\n \"\"\"Show an introspection rule.\"\"\"\n\n def get_parser(self, prog_name):\n parser = super(RuleShowCommand, self).get_parser(prog_name)\n parser.add_argument('uuid', help='rule UUID')\n return parser\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.baremetal_introspection\n rule = client.rules.get(parsed_args.uuid)\n del rule['links']\n return self.dict2columns(rule)\n\n\nclass RuleDeleteCommand(command.Command):\n \"\"\"Delete an introspection rule.\"\"\"\n\n def get_parser(self, prog_name):\n parser = super(RuleDeleteCommand, self).get_parser(prog_name)\n parser.add_argument('uuid', help='rule UUID')\n return parser\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.baremetal_introspection\n client.rules.delete(parsed_args.uuid)\n\n\nclass RulePurgeCommand(command.Command):\n \"\"\"Drop all introspection rules.\"\"\"\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.baremetal_introspection\n client.rules.delete_all()\n\n\nclass DataSaveCommand(command.Command):\n \"\"\"Save or display raw introspection data.\"\"\"\n\n def get_parser(self, prog_name):\n parser = super(DataSaveCommand, self).get_parser(prog_name)\n parser.add_argument(\"--file\", metavar=\"\",\n help=\"downloaded introspection data filename \"\n \"(default: stdout)\")\n parser.add_argument('node', help='baremetal node UUID or name')\n return parser\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.baremetal_introspection\n data = client.get_data(parsed_args.node,\n raw=bool(parsed_args.file))\n if parsed_args.file:\n with open(parsed_args.file, 'wb') as fp:\n fp.write(data)\n else:\n json.dump(data, sys.stdout)\n\n\nclass InterfaceListCommand(command.Lister):\n \"\"\"List interface data including attached switch port information.\"\"\"\n\n def get_parser(self, prog_name):\n parser = super(InterfaceListCommand, self).get_parser(prog_name)\n parser.add_argument('node_ident', help='baremetal node UUID or name')\n parser.add_argument(\"--vlan\",\n action='append',\n default=[], type=int,\n help=\"List only interfaces configured \"\n \"for this vlan id, can be repeated\")\n display_group = parser.add_mutually_exclusive_group()\n display_group.add_argument(\n '--long', dest='detail',\n action='store_true', default=False,\n help=\"Show detailed information about interfaces.\")\n display_group.add_argument(\n '--fields', nargs='+', dest='fields',\n metavar='',\n choices=sorted(res.InterfaceResource(detailed=True).fields),\n help=\"Display one or more fields. \"\n \"Can not be used when '--long' is specified\")\n\n return parser\n\n def take_action(self, parsed_args):\n\n client = self.app.client_manager.baremetal_introspection\n\n # If --long defined, use all fields\n interface_res = res.InterfaceResource(parsed_args.fields,\n parsed_args.detail)\n\n rows = client.get_all_interface_data(parsed_args.node_ident,\n interface_res.fields,\n vlan=parsed_args.vlan)\n\n return interface_res.labels, rows\n\n\nclass InterfaceShowCommand(command.ShowOne):\n \"\"\"Show interface data including attached switch port information.\"\"\"\n\n COLUMNS = (\"Field\", \"Value\")\n\n def get_parser(self, prog_name):\n parser = super(InterfaceShowCommand, self).get_parser(prog_name)\n parser.add_argument('node_ident', help='baremetal node UUID or name')\n parser.add_argument('interface', help='interface name')\n parser.add_argument(\n '--fields', nargs='+', dest='fields',\n metavar='',\n choices=sorted(res.InterfaceResource(detailed=True).fields),\n help=\"Display one or more fields.\")\n\n return parser\n\n def take_action(self, parsed_args):\n\n client = self.app.client_manager.baremetal_introspection\n\n if parsed_args.fields:\n interface_res = res.InterfaceResource(parsed_args.fields)\n else:\n # Show all fields in detailed resource\n interface_res = res.InterfaceResource(detailed=True)\n\n iface_dict = client.get_interface_data(parsed_args.node_ident,\n parsed_args.interface,\n interface_res.fields)\n\n return tuple(zip(*(iface_dict.items())))\n","repo_name":"numvc/LuxoftBot","sub_path":"venv/Lib/site-packages/ironic_inspector_client/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":14006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"18375735266","text":"import models\nimport processors\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\n\n# Seed random number generator\nseed = 20180517\nrng = np.random.RandomState(seed)\n\n# Define wind model parameters\nwind_model_params = {\n 'sim_region': models.Rectangle(0., -50., 100., 50.),\n 'nx': 21,\n 'ny': 21,\n 'u_av': 1.,\n 'v_av': 0.,\n 'Kx': 2.,\n 'Ky': 2.,\n 'noise_gain': 20.,\n 'noise_damp': 0.1,\n 'noise_bandwidth': 0.2,\n}\n\n# Create wind model object\nwind_model = models.WindModel(noise_rand=rng, **wind_model_params)\n\n# Define plume simulation region\n# This is a subset of the wind simulation region to minimise boundary effects\nsim_region = models.Rectangle(0., -12.5, 50., 12.5)\n\n# Define plume model parameters\nplume_model_params = {\n 'source_pos': (5., 0., 0.),\n 'centre_rel_diff_scale': 2.,\n 'puff_release_rate': 10,\n 'puff_init_rad': 0.001**0.5,\n 'puff_spread_rate': 0.001,\n 'init_num_puffs': 10,\n 'max_num_puffs': 1000,\n 'model_z_disp': True,\n}\n\n# Create plume model object\nplume_model = models.PlumeModel(\n prng=rng, sim_region=sim_region, wind_model=wind_model, \n **plume_model_params)\n\n# Define concentration array (image) generator parameters\narray_gen_params = {\n 'array_z': 0.,\n 'nx': 500,\n 'ny': 250,\n 'puff_mol_amount': 8.3e8\n}\n\n# Create concentration array generator object\narray_gen = processors.ConcentrationArrayGenerator(\n array_xy_region=sim_region, **array_gen_params)\n \n# Set up figure\nfig = plt.figure(figsize=(5, 2.5))\nax = fig.add_axes([0., 0., 1., 1.])\nax.axis('off')\n\n# Display initial concentration field as image\nconc_array = array_gen.generate_single_array(plume_model.puff_array)\nim_extents = (sim_region.x_min, sim_region.x_max,\n sim_region.y_min, sim_region.y_max)\nconc_im = ax.imshow(\n conc_array.T, extent=im_extents, vmin=0., vmax=1e10, cmap='Reds')\n\n# Simulation timestep\ndt = 0.01\n\n# Define animation update function\ndef update(i):\n # Do 10 time steps per frame update\n for k in range(10):\n wind_model.update(dt)\n plume_model.update(dt)\n conc_array = array_gen.generate_single_array(plume_model.puff_array)\n conc_im.set_data(conc_array.T)\n return [conc_im]\n\n# Animate plume concentration and save as MP4\nanim = FuncAnimation(fig, update, frames=400, repeat=False)\n#anim.save('plume.mp4', dpi=100, fps=20, extra_args=['-vcodec', 'libx264'])\n","repo_name":"alexliberzonlab/mothpy","sub_path":"mothpy/pompy_comparison.py","file_name":"pompy_comparison.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"5523299938","text":"\"\"\"\nDictionary Comprehesion\n\nSintaxe = {chve : valor for valor in iteravel}\n\"\"\"\n\n\"\"\"\n# Exemplos\n\nnumeros = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}\n\nquadrado = {chave: valor ** 2 for chave, valor in numeros.items()} \n# .item devolve cada chave e valor dentro de uma tupla\nprint(quadrado)\n\"\"\"\n\n\"\"\"\nnumeros = {1, 2, 3, 4, 5}\n\nquadrado = {valor: valor ** 2 for valor in numeros}\n# para cada valor in numero: coloque valor como chave e o valor ** 2 como valor\n\nprint(quadrado)\n\"\"\"\n\n\"\"\"\nchave = 'abcde'\n\nvalores = [1, 2, 3, 4, 5]\n\nmistura = {chave[i]: valores[i] for i in range(0, len(chave))}\n# para i no range de 0 até o len de chave, mostre chave na posição i e valor\n# na posição i\n\nprint(mistura)\n\"\"\"\n\n# exemplo com logica condicional\nnumeros = [1, 2, 3, 4, 5]\n\nres = {num: ('par' if num % 2 == 0 else 'impar') for num in numeros}\n# para cada numero na variavel numero faça: adicione o num em num e\n# verifique se le é impar ou par, ou seja, quando o num passar por 1\n# ele vai adicionar esse valor na variavel num e chechar se é impar ou par\n","repo_name":"rafalacerda1530/Python","sub_path":"Aulas sec 9/Dictnoary comprehesion.py","file_name":"Dictnoary comprehesion.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"37513794155","text":"#Import our configuration functions\nfrom models import Film\nfrom config import get_session\n\n#Get instances for working with DB\nsession = get_session()\n\n#***********************Working with db********************\nprint(\"Working with table: \", Film.__name__)\n'''\nfilms = session.query(Film).session.query(Film)\n#Simple query \nfor film in films: \n print(\"for\", film.title)\n\n#Using query.filter_by\nq = session.query(Film).filter_by(year = '2018')\nfilm = q.first()\nprint(\"filter_by\", film)\n\n#Using SQL\nq = session.execute(\"SELECT * FROM film\")\nfilm = q.first()\nprint(\"SQL\", film)\n'''\n# User is the name of table that has a column name\ns = Film.select().where(Film.c.id>2)\nresult = session.execute(s)\n\nfor row in result:\n print (row)","repo_name":"WebSofter/lessnor","sub_path":"sqlalchemy/test_select.py","file_name":"test_select.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72372863492","text":"#! /usr/bin/env python\n\nimport sys, re, glob\n\ngsm_file = sys.argv[1]\ngsm_out_dir = sys.argv[2]\noutput_file = sys.argv[3]\nmotif_count_thres = 5 \n# motif_count_thres = 2\n\n\nsample_motif = {}\nmotif2splicing_info = {}\nwith open(gsm_file, 'r') as hin:\n\n header2ind = {}\n header = hin.readline().rstrip('\\n').split('\\t')\n for (i, cname) in enumerate(header):\n header2ind[cname] = i\n\n for line in hin:\n F = line.rstrip('\\n').split('\\t')\n sample_motif[F[header2ind[\"Sample_Name\"]] + '\\t' + F[header2ind[\"Motif_Pos\"]]] = 1\n\n if F[header2ind[\"Motif_Pos\"]] not in motif2splicing_info: motif2splicing_info[F[header2ind[\"Motif_Pos\"]]] = []\n \n splicing_info = '\\t'.join(F[header2ind[x]] for x in [\"Splicing_Key\", \"Splicing_Class\", \"Is_Inframe\"])\n \n if splicing_info not in motif2splicing_info[F[header2ind[\"Motif_Pos\"]]]:\n motif2splicing_info[F[header2ind[\"Motif_Pos\"]]].append(splicing_info)\n\n\n# count motif count\nmotif2count = {}\nfor sm in sample_motif:\n sample, motif = sm.split('\\t')\n if motif not in motif2count: motif2count[motif] = 0\n motif2count[motif] = motif2count[motif] + 1\n\n\n# check splicing files\nsample2SJ_file = {}\nsample2IR_file = {}\nsample2weight = {}\ngsm_input_files = glob.glob(gsm_out_dir + \"/*.mut_SJ_IR_list.txt\")\n\nfor gsm_input in sorted(gsm_input_files):\n with open(gsm_input, 'r') as hin:\n header2ind = {}\n header = hin.readline().rstrip('\\n').split('\\t')\n for (i, cname) in enumerate(header):\n header2ind[cname] = i\n\n for line in hin:\n F = line.rstrip('\\n').split('\\t')\n sample = F[header2ind[\"Sample_Name\"]]\n sample2SJ_file[sample] = F[header2ind[\"SJ_File\"]]\n sample2IR_file[sample] = F[header2ind[\"IR_File\"]]\n sample2weight[sample] = F[header2ind[\"Weight\"]]\n\n\nprocessed_keys = {}\nhout = open(output_file, 'w')\nprint >> hout, '\\t'.join([\"Cancer_Type\", \"Gene_Symbol\", \"Sample_Name\", \"Mutation_Key\", \"Motif_Pos\", \"Mutation_Type\", \"Is_Canonical\",\n \"Splicing_Key\", \"Splicing_Class\", \"Is_Inframe\", \"Supporting_Read_Num\", \"Weight\"])\n\nwith open(gsm_file, 'r') as hin:\n\n header2ind = {}\n header = hin.readline().rstrip('\\n').split('\\t')\n for (i, cname) in enumerate(header):\n header2ind[cname] = i\n\n for line in hin:\n F = line.rstrip('\\n').split('\\t')\n if int(motif2count[F[header2ind[\"Motif_Pos\"]]]) <= motif_count_thres: continue\n\n temp_key = '\\t'.join(F[header2ind[x]] for x in [\"Cancer_Type\", \"Gene_Symbol\", \"Sample_Name\", \n \"Mutation_Key\", \"Motif_Pos\", \"Mutation_Type\", \"Is_Canonical\"])\n\n if temp_key in processed_keys: continue\n processed_keys[temp_key] = 1\n\n sample = F[header2ind[\"Sample_Name\"]]\n motif_pos = F[header2ind[\"Motif_Pos\"]]\n weight = sample2weight[sample]\n\n for sp_info in motif2splicing_info[motif_pos]:\n sp_key, sp_class, is_inframe = sp_info.split('\\t')\n pos_match = re.match(r'([\\w\\d]+)\\:(\\d+)\\-(\\d+)', sp_key)\n schr, sstart, send = pos_match.group(1), pos_match.group(2), pos_match.group(3)\n\n sp_count = 0\n # SJ file\n if sstart != send:\n with open(sample2SJ_file[sample], 'r') as hin2:\n for line2 in hin2:\n F2 = line2.rstrip('\\n').split('\\t')\n if schr == F2[0] and sstart == F2[1] and send == F2[2]:\n sp_count = int(F2[6])\n \n # IR_file \n if sstart == send:\n with open(sample2IR_file[sample], 'r') as hin2:\n header2ind2 = {}\n header2 = hin2.readline().rstrip('\\n').split('\\t')\n for (i, cname) in enumerate(header2):\n header2ind2[cname] = i\n \n for line2 in hin2:\n F2 = line2.rstrip('\\n').split('\\t')\n if schr == F2[header2ind2[\"Chr\"]] and sstart == F2[header2ind2[\"Boundary_Pos\"]]:\n sp_count = int(F2[header2ind2[\"Intron_Retention_Read_Count\"]])\n\n print >> hout, temp_key + '\\t' + sp_key + '\\t' + sp_class + '\\t' + is_inframe + '\\t' + str(sp_count) + '\\t' + weight\n\n\n","repo_name":"friend1ws/savnet_paper","sub_path":"analysis/TCGA/result/script/subscript_matome/gather_read_num.py","file_name":"gather_read_num.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"12237459326","text":"# Script to sync with upstream.\n#\n# Meant to be called via CI\n\nimport sys\n\nimport configargparse\nfrom git import Repo\nfrom git.exc import GitError\n\nREMOTE_URL = \"https://github.com/{}/{}.git\"\n\n\nclass SyncException(Exception):\n def __init__(self, message):\n super().__init__(self)\n self.message = message\n\n\nif __name__ == \"__main__\":\n config = configargparse.ArgParser()\n\n # Get our required arguments, they can be specified by environment variables\n config.add('--upstream-org', env_var='UPSTREAM_ORG', help='The upstream github organization', required=True)\n config.add('--upstream-repo', env_var='UPSTREAM_REPO', help='The upstream repository', required=True)\n config.add('--upstream-branch', env_var='UPSTREAM_BRANCH', help='The upstream branch', required=True)\n config.add('--origin-branch', env_var='ORIGIN_BRANCH', help='The downstream branch that will be pushed to the '\n 'origin remote', required=True)\n config.add('path', help='Path to local repository')\n options = config.parse_args()\n\n repo = Repo(options.path)\n\n # Fail if upstream remote is already defined\n if 'upstream' in repo.remotes:\n print('There is already an \"upstream\" remote. Exiting.')\n sys.exit(-1)\n\n try:\n # Fail if origin remote is not defined\n if 'origin' not in repo.remotes:\n raise SyncException('There is no \"origin\" remote to push to.')\n\n # Fetch the upstream remote\n upstream_url = REMOTE_URL.format(options.upstream_org, options.upstream_repo)\n print(\"Fetching upstream: {}.\".format(upstream_url))\n upstream = repo.create_remote('upstream', upstream_url)\n fetch_info = upstream.fetch(prune=True)\n for info in fetch_info:\n if info.flags & info.ERROR:\n raise SyncException(info.note)\n\n # Fetch the origin remote\n origin = repo.remotes.origin\n print(\"Fetching origin: {}.\".format(origin.url))\n fetch_info = origin.fetch(prune=True)\n for info in fetch_info:\n if info.flags & info.ERROR:\n raise SyncException(info.note)\n\n # Fail if the upstream branch does not exist\n if options.upstream_branch not in upstream.refs:\n raise SyncException('Upstream branch ({}) does not exist.'.format(options.upstream_branch))\n\n # Origin branch does not exist in local repo\n if options.origin_branch not in repo.heads:\n repo.create_head(options.origin_branch, upstream.refs[options.upstream_branch])\n\n if options.origin_branch in origin.refs:\n print(\"Origin branch ({}) exists on remote.\".format(options.origin_branch))\n origin_commit = origin.refs[options.origin_branch].commit\n print(\"Origin commit: {}\".format(origin_commit))\n else:\n origin_commit = None\n print(\"Origin branch ({}) does not exist on remote, creating it.\".format(options.origin_branch))\n\n upstream_commit = upstream.refs[options.upstream_branch].commit\n print(\"Upstream commit: {}\".format(upstream_commit))\n\n if upstream_commit == origin_commit:\n print(\"Already up to date. Exiting without update.\")\n else:\n repo.heads[options.origin_branch].reference = upstream.refs[options.upstream_branch].commit\n print(\"Pushing to origin ({})\".format(repo.heads[options.origin_branch]))\n push_info = origin.push(repo.heads[options.origin_branch], force=True)\n for info in push_info:\n if info.flags & info.ERROR:\n raise SyncException(info.summary)\n print(\"Updated!\")\n\n except SyncException as e:\n error = \"\\n\".join([e.message.strip(), \"Exiting.\"])\n print(error)\n sys.exit(-1)\n\n except GitError as e:\n print(\"Git Error!\")\n print(e)\n sys.exit(-2)\n\n finally:\n # clean up remotes\n if 'upstream' in repo.remotes:\n repo.delete_remote(repo.remotes.upstream)\n","repo_name":"ThePalaceProject/ci-scripts","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13445382392","text":"# -*- coding: utf-8 -*-\n# !/usr/bin/env python\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# !!! from django.template import Template\n\nimport games\nimport languageTemplate\n\nimport transliterate\nimport unicodeinfo # Some character data\n\nimport wordsearch\n\nimport json\nimport logging\nimport os\nimport unicodedata\nimport webapp2\n\nfrom google.appengine.ext.webapp import template\n\ntry:\n unichr\nexcept NameError:\n unichr = chr\n\ntry:\n UNICODE_EXISTS = bool(type(unicode))\nexcept NameError:\n unicode = lambda s: str(s)\n\ntry:\n xrange\nexcept NameError:\n xrange = range\n\n\n# Explicitly NOT PART OF THE CLASS\n\n\n# Shows keyboards for Language\nclass LanguagesHomeHandler(webapp2.RequestHandler):\n def get(self, match=None):\n # Match is the actual url route matched.\n req = webapp2.get_request()\n # Can use this for additional information\n langInfo = self.app.config.get('langInfo')\n\n try:\n text_direction = langInfo.direction\n except AttributeError:\n text_direction = 'ltr'\n\n try:\n test_data = langInfo.test_data\n except AttributeError:\n test_data = ''\n try:\n variation_sequence = langInfo.variation_sequence\n except:\n variation_sequence = None\n\n try:\n to_keyman = langInfo.to_keyman\n except:\n to_keyman = None\n\n try:\n encoded_ranges = langInfo.encoded_ranges\n except:\n encoded_ranges = None\n\n try:\n allFonts = langInfo.allFonts\n except:\n allFonts = False\n\n try:\n home_html = langInfo.custom_home_template\n except:\n home_html = 'HTML/demo_general.html'\n\n # Information on the characters in the Unicode range\n unicodeCharData = {}\n\n try:\n for interval in langInfo.unicodeRanges:\n start = interval[0]\n end = interval[1] + 1\n\n for x in range(start, end):\n chr = unichr(x)\n unicodeCharData[chr] = [\n unicodedata.name(chr), unicodedata.category(chr),\n ]\n except:\n print('!!! Unicode range not set')\n\n # Before all text in the output area, e.g., U+202E to force RTL\n try:\n insert_text = langInfo.insert_text\n except:\n insert_text = ''\n \n template_values = {\n 'allFonts': allFonts,\n 'direction': text_direction,\n 'insert_text': insert_text,\n 'encoded_ranges': encoded_ranges,\n 'language': langInfo.Language,\n 'langTag': langInfo.LanguageCode,\n 'font_list': langInfo.unicode_font_list,\n 'lang_list': langInfo.lang_list,\n 'kb_list': langInfo.kb_list,\n 'langInfo': langInfo,\n 'links': langInfo.links,\n 'showTools': self.request.get('tools', None),\n 'test_data': test_data,\n 'variation_sequence': variation_sequence,\n 'to_keyman': to_keyman,\n }\n path = os.path.join(os.path.dirname(__file__), home_html)\n self.response.out.write(template.render(path, template_values))\n\n\n# AJAX handler for converter\nclass ConvertHandler(webapp2.RequestHandler):\n def get(self, match=None):\n # TODO: Get the text values\n # Call transliterator\n # Return JSON structure with values.\n\n langInfo = self.app.config.get('langInfo')\n\n try:\n if langInfo.transliterator:\n transliterator = transliterate.Transliterate(\n langInfo.transliterator.TRANS_LIT_RULES,\n langInfo.transliterator.DESCRIPTION)\n except AttributeError:\n transliterator = None\n\n out_text = '\\u11103\\u11101\\u11103'\n message = 'TBD'\n error = ''\n\n result = {\n 'outText': out_text,\n 'message': message,\n 'error': error,\n 'language': langInfo.Language,\n 'lang_list': langInfo.lang_list,\n 'langTag': langInfo.LanguageCode,\n 'showTools': self.request.get('tools', None),\n # 'summary' : transliterator.getSummary(),\n }\n self.response.out.write(json.dumps(result))\n\n\ndef surrogate_to_utf32(high, low):\n return (high << 10) + low - 0x35fdc00\n\n\nclass DiacriticHandler(webapp2.RequestHandler):\n def get(self, match=None):\n langInfo = self.app.config.get('langInfo')\n\n base_num = self.request.get('base', None)\n if base_num:\n base_char = unichr(int(base_num, base=16))\n else:\n base_char = langInfo.base_consonant\n\n # Generate combinations of base + diacritic pairs\n combos = []\n table = []\n row_names = []\n for x in langInfo.diacritic_list:\n if len(x) > 1:\n utf32 = surrogate_to_utf32(ord(x[0]), ord(x[1]))\n row = ['%s (0x%x)' % (x, utf32)]\n else:\n row = [x + ' (%4x)' % ord(x)]\n row_names.append(row[0])\n for y in langInfo.diacritic_list:\n text = base_char + x + y\n combos.append({'text': text,\n 'codes': ['%4x ' % ord(c) for c in text]})\n row.append(text)\n table.append(row)\n\n try:\n text_direction = langInfo.direction\n except AttributeError:\n text_direction = 'ltr'\n\n template_values = {\n 'direction': text_direction,\n 'language': langInfo.Language,\n 'base_char': base_char.encode('utf-8'),\n 'base_hex': ['%4x' % ord(x) for x in langInfo.base_consonant],\n 'diacritics': [x for x in langInfo.diacritic_list],\n 'diacritics_hex': row_names, # ['%4x ' % ord(y) for y in langInfo.diacritic_list],\n 'combinations': combos,\n 'showTools': self.request.get('tools', None),\n 'table': table,\n 'unicode_font_list': langInfo.unicode_font_list,\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/diacritics.html')\n\n self.response.out.write(template.render(path, template_values))\n\n\n# Presents UI for conversions from font encoding to Unicode.\nclass ConvertUIHandler(webapp2.RequestHandler):\n def get(self, match=None):\n\n langInfo = self.app.config.get('langInfo')\n\n # All old characters\n try:\n oldInput = langInfo.test_chars\n test_char_list = langInfo.test_chars\n except AttributeError:\n oldInput = u''\n\n oldChars = (u'\\u0001 !\"\\u0023\\u0024%&\\'()*+,-./' +\n '0123456789:;<=>?@' +\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ[ \\\\ ]^_`' +\n 'abcdefghijklmnopqrstuvwxyz{|}~')\n text = self.request.get('text', oldChars)\n font = self.request.get('font')\n try:\n testStringList = langInfo.testStringList\n except:\n testStringList = [\n {'name': 'Test 1', # Note: must escape the single quote.\n 'string': u'\\u0004\\u0005\\u0006\\u0007\\u0008\\u0009' +\n '\\u000a\\u000b'},\n ]\n text = ''\n #print('********* %s' % langInfo.encodedRanges)\n #try:\n encode_chars = []\n # Get conversion data from explicit list of encoded hex values.\n try:\n for interval in langInfo.encodedRanges:\n start = interval[0]\n end = interval[1] + 1\n\n for x in range(start, end):\n encode_chars.append(unichr(x))\n\n text = ''.join(encode_chars)\n except:\n print('----- failed')\n try:\n text = langInfo.convertText\n except:\n text = self.request.get('text', oldChars)\n\n try:\n text_direction = langInfo.direction\n except AttributeError:\n text_direction = 'ltr'\n\n # Handle non-Unicode output.\n try:\n output_font = langInfo.outputFont\n except:\n output_font = 'Unicode'\n\n try:\n unicodeChars = langInfo.unicodeChars\n except:\n unicodeChars = '\\ud804\\udd00'\n unicodeChars += '\\ud804\\udd03'\n unicodeChars += '\\ud804\\udd04'\n unicodeChars += '\\ud804\\udd05'\n unicodeChars += '\\ud804\\udd06'\n\n try:\n unicodeCombiningChars = getCombiningCombos(\n langInfo.baseHexUTF16, langInfo.diacritic_list)\n except:\n unicodeCombiningChars = None\n\n try:\n encodingList = langInfo.encoding_font_list\n except:\n encodingList = None\n\n try:\n variation_sequence = langInfo.variation_sequence\n except:\n variation_sequence = None\n\n try:\n converters = langInfo.converters\n except:\n converters = None\n\n template_values = {\n 'converters': converters,\n 'isTransLit': False,\n 'font': font,\n 'language': langInfo.Language,\n 'langTag': langInfo.LanguageCode,\n 'encodingList': encodingList,\n 'lang_list': langInfo.lang_list,\n 'kb_list': langInfo.kb_list,\n 'direction': text_direction,\n 'unicodeFonts': langInfo.unicode_font_list,\n 'links': langInfo.links,\n 'oldChars': oldChars,\n 'oldInput': oldInput,\n 'outputFont': output_font,\n 'text': text,\n 'textStrings': testStringList,\n 'showTools': self.request.get('tools', None),\n 'unicodeChars': unicodeChars,\n 'combiningChars': unicodeCombiningChars,\n 'variation_sequence': variation_sequence,\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/translit_general.html')\n self.response.out.write(template.render(path, template_values))\n\n# Create a string with combinations of the combining characters,\n# following the given base character.\n# TODO: Finish this.\ndef getCombiningCombos(baseHexChar, diacritic_list):\n\n combineOffsets = range(0x1d, 0x1e, 0x1f).append(range(0x20, 0x2b))\n\n testString = u''\n for c0 in diacritic_list:\n for c1 in diacritic_list:\n testString += baseHexChar + c0 + c1 + ' '\n testString += '\\u000a'\n return testString\n\ndef encodingFontListToUnicodeList(encodinglist):\n unicodeList = []\n for item in encodinglist:\n unicodeList.append({\n 'family': item['font_name'],\n 'longName': item['display_name'],\n 'source': item['font_path']\n }\n )\n return unicodeList\n\ndef unicodeFontListToEncodingList(unicodeList):\n encodinglist = []\n for item in unicodeList:\n encodinglist.append({\n 'font_name': item['family'],\n 'display_name': item['longName'],\n 'font_path': item['source']\n }\n )\n return encodinglist\n\nclass EncodingRules(webapp2.RequestHandler):\n def get(self, match=None):\n\n langInfo = self.app.config.get('langInfo')\n try:\n encoding_tables = langInfo.encoding_tables\n except:\n encoding_tables = None\n\n try:\n converter_list = langInfo.converters\n except:\n converter_list = None\n try:\n conversion_data = langInfo.conversion_data\n except:\n conversion_data = None\n\n template_values = {\n 'converterJS': '/js/' + langInfo.LanguageCode + 'Converter.js',\n 'converter_list': converter_list,\n 'conversion_data': conversion_data,\n 'language': langInfo.Language,\n 'lang_list': langInfo.lang_list,\n 'encoding_list': langInfo.encoding_font_list,\n 'encoding_tables': encoding_tables,\n 'unicode_list': langInfo.unicode_font_list,\n 'kb_list': langInfo.kb_list,\n 'links': langInfo.links,\n 'showTools': self.request.get('tools', None),\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/encodingConvert.html')\n self.response.out.write(template.render(path, template_values))\n\n\n# Cloned from KeyboardTansformsHandler.\nclass PhoneticKbHandler(webapp2.RequestHandler):\n def get(self, match=None):\n\n langInfo = self.app.config.get('langInfo')\n\n try:\n converter_list = langInfo.converters\n except:\n converter_list = None\n\n try:\n text_functions = langInfo.text_functions\n except:\n text_functions = None\n\n try:\n print('unicodeinfo = %s' % langInfo.unicode_database)\n unicode_info = unicodeinfo.UnicodeData(langInfo.unicode_database)\n print('UNICODE_INFO = %s' % unicode_info)\n unicode_data = unicode_info.numTextString()\n print('UNICODE_DATA = %s' % unicode_data)\n except BaseException as err:\n print('unicodeinfo not read: %s' % err)\n unicode_data = ''\n\n template_values = {\n 'converterJS': '/js/' + langInfo.LanguageCode + 'Converter.js',\n 'converter_list': converter_list,\n 'language': langInfo.Language,\n 'lang_list': langInfo.lang_list,\n 'encoding_list': langInfo.encoding_font_list,\n 'unicode_list': langInfo.unicode_font_list,\n 'kb_list': langInfo.kb_list,\n 'links': langInfo.links,\n 'showTools': self.request.get('tools', None),\n 'text_functions': text_functions,\n 'unicode_data': unicode_data,\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/phoneticTable.html')\n self.response.out.write(template.render(path, template_values))\n\n\nclass KeyboardTransforms(webapp2.RequestHandler):\n def get(self, match=None):\n\n langInfo = self.app.config.get('langInfo')\n\n try:\n converter_list = langInfo.converters\n except:\n converter_list = None\n\n try:\n text_functions = langInfo.text_functions\n except:\n text_functions = None\n\n template_values = {\n 'converterJS': '/js/' + langInfo.LanguageCode + 'Converter.js',\n 'converter_list': converter_list,\n 'language': langInfo.Language,\n 'lang_list': langInfo.lang_list,\n 'encoding_list': langInfo.encoding_font_list,\n 'unicode_list': langInfo.unicode_font_list,\n 'kb_list': langInfo.kb_list,\n 'links': langInfo.links,\n 'showTools': self.request.get('tools', None),\n 'text_functions': text_functions\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/keyboardTransforms.html')\n self.response.out.write(template.render(path, template_values))\n\nclass KeyboardTransforms2(webapp2.RequestHandler):\n # A test version of KB transforms that support selection by keyboard\n def get(self, match=None):\n\n langInfo = self.app.config.get('langInfo')\n\n try:\n converter_list = langInfo.converters\n except:\n converter_list = None\n\n try:\n text_functions = langInfo.text_functions\n except:\n text_functions = None\n\n template_values = {\n 'converterJS': '/js/' + langInfo.LanguageCode + 'Converter.js',\n 'converter_list': converter_list,\n 'language': langInfo.Language,\n 'lang_list': langInfo.lang_list,\n 'encoding_list': langInfo.encoding_font_list,\n 'unicode_list': langInfo.unicode_font_list,\n 'kb_list': langInfo.kb_list,\n 'links': langInfo.links,\n 'showTools': self.request.get('tools', None),\n 'text_functions': text_functions\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/kbTransforms2.html')\n self.response.out.write(template.render(path, template_values))\n\n\nclass Downloads(webapp2.RequestHandler):\n def get(self, match=None):\n\n langInfo = self.app.config.get('langInfo')\n # To possibly limit fonts from download\n try:\n public_unicode_fonts = langInfo.public_font_resources\n except:\n public_unicode_fonts = langInfo.unicode_font_list\n\n try:\n text_file_list = langInfo.text_file_list\n except:\n text_file_list = None\n\n template_values = {\n 'language': langInfo.Language,\n 'language_native': langInfo.Language_native,\n 'unicode_font_list': public_unicode_fonts,\n 'file_list': text_file_list,\n 'showTools': self.request.get('tools', None),\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/downloads.html')\n self.response.out.write(template.render(path, template_values))\n\nclass RenderPage(webapp2.RequestHandler):\n def get(self, match=None):\n\n langInfo = self.app.config.get('langInfo')\n\n kb_list = [\n {'shortName': langInfo.LanguageCode,\n 'longName': langInfo.Language + ' Unicode',\n }\n ]\n try:\n text_direction = langInfo.direction\n except AttributeError:\n text_direction = 'ltr'\n\n template_values = {\n 'converterJS': \"/js/' + langInfo.LanguageCode + 'Converter.js\",\n 'direction': text_direction,\n 'language': langInfo.Language,\n 'lang_list': langInfo.lang_list,\n 'encoding_list': langInfo.encoding_font_list,\n 'unicode_list': langInfo.unicode_font_list,\n 'kb_list': langInfo.kb_list,\n 'links': langInfo.links,\n 'showTools': self.request.get('tools', None),\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/renderCombos.html')\n self.response.out.write(template.render(path, template_values))\n\nclass DictionaryInput(webapp2.RequestHandler):\n def get(self, match=None):\n req = webapp2.get_request()\n top_path = req.path.split('/')\n lang_code = top_path[1]\n\n langInfo = self.app.config.get('langInfo')\n\n # user_info = getUserInfo(self.request.url)\n\n template_values = {\n 'lang': langInfo.Language,\n 'lang1': langInfo.dictionaryLang1,\n 'lang2': langInfo.dictionaryLang2,\n 'kb1': langInfo.kb1,\n 'kb2': langInfo.kb2,\n 'showTools': self.request.get('tools', None),\n 'unicodeFontList': langInfo.unicode_font_list,\n\n 'links': langInfo.links,\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/dictionaryInput.html')\n self.response.out.write(template.render(path, template_values))\n\n\n# For N languages in the dictionary\nclass CollationHandler(webapp2.RequestHandler):\n def get(self, match=None):\n req = webapp2.get_request()\n top_path = req.path.split('/')\n lang_code = top_path[1]\n\n langInfo = self.app.config.get('langInfo')\n try:\n collation_string = langInfo.collation_string\n except:\n collation_string = None\n\n # user_info = getUserInfo(self.request.url)\n\n # t = Template(\"My name is {{ person.first_name }}.\")\n\n template_values = {\n 'language': langInfo.Language,\n 'langInfo': langInfo,\n 'collation_data' : langInfo.collation_data,\n 'collation_string': langInfo.collation_string,\n 'converters': langInfo.converters,\n 'unicodeFontList': langInfo.unicode_font_list,\n 'showTools': self.request.get('tools', None),\n 'links': langInfo.links,\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/collationView.html')\n self.response.out.write(template.render(path, template_values))\n\n\n# For N languages in the dictionary\nclass DictionaryN(webapp2.RequestHandler):\n def get(self, match=None):\n req = webapp2.get_request()\n top_path = req.path.split('/')\n lang_code = top_path[1]\n\n langInfo = self.app.config.get('langInfo')\n\n # user_info = getUserInfo(self.request.url)\n\n # t = Template(\"My name is {{ person.first_name }}.\")\n\n template_values = {\n 'dictionaryNData': langInfo.dictionaryNData,\n\n 'lang1': langInfo.dictionaryLang1,\n 'lang2': langInfo.dictionaryLang2,\n 'kb1': langInfo.kb1,\n 'kb2': langInfo.kb2,\n 'unicodeFontList': langInfo.unicode_font_list,\n 'showTools': self.request.get('tools', None),\n 'links': langInfo.links,\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/dictionaryN.html')\n self.response.out.write(template.render(path, template_values))\n\n# Create an instance of the template and add to configuration.\n# so values can be passed to the classes\ninstance = languageTemplate.languageTemplate()\nbasePath = '/' + instance.LanguageCode\n\n\nclass AllFontTest(webapp2.RequestHandler):\n def get(self):\n utext = self.request.get(\"utext\", \"\")\n encodedText = self.request.get(\"encodedText\", \"\")\n\n langInfo = self.app.config.get('langInfo')\n try:\n langTag = langInfo.LanguageTag\n except:\n langTag = langInfo.LanguageCode\n\n try:\n all_chars = langInfo.all_chars\n except:\n all_chars = None\n\n template_values = {\n 'scriptName': langInfo.Language,\n 'fontFamilies': langInfo.unicode_font_list,\n 'encodedText': encodedText,\n 'utext': utext,\n 'all_chars': all_chars,\n 'language': langInfo.Language,\n 'LanguageTag': langTag,\n 'kb_list': langInfo.kb_list,\n }\n\n path = os.path.join(os.path.dirname(__file__), 'HTML/allFonts.html')\n self.response.out.write(template.render(path, template_values))\n\n# Presents UI for conversions from one Unicode script to another.\n# TODO: use common elements with ConvertUIHandler\nclass TranslitHandler(webapp2.RequestHandler):\n def get(self, match=None):\n\n langInfo = self.app.config.get('langInfo')\n\n # All old characters\n try:\n oldInput = langInfo.test_chars[0]\n test_char_list = langInfo.test_chars\n except AttributeError:\n oldInput = u''\n\n oldChars = (u'\\u0001 !\"\\u0023\\u0024%&\\'()*+,-./' +\n '0123456789:;<=>?@' +\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ[ \\\\ ]^_`' +\n 'abcdefghijklmnopqrstuvwxyz{|}~')\n text = self.request.get('text', oldChars)\n font = self.request.get('font')\n testStringList = [\n {'name': 'Test 1', # Note: must escape the single quote.\n 'string': u'\\u0004\\u0005\\u0006\\u0007\\u0008\\u0009' +\n '\\u000a\\u000b'},\n ]\n\n try:\n text_direction = langInfo.direction\n except AttributeError:\n text_direction = 'ltr'\n\n # Handle non-Unicode output.\n try:\n output_font = langInfo.outputScript\n except:\n output_font = 'Unicode'\n\n try:\n unicodeChars = langInfo.unicodeChars\n except:\n unicodeChars = ''\n\n try:\n unicodeCombiningChars = getCombiningCombos(\n langInfo.baseHexUTF16, langInfo.diacritic_list)\n except:\n unicodeCombiningChars = None\n\n try:\n encodingList = langInfo.encoding_font_list\n except:\n encodingList = None\n\n try:\n variation_sequence = langInfo.variation_sequence\n except:\n variation_sequence = None\n\n try:\n converters = langInfo.converters\n except:\n converters = None\n\n try:\n translit_encoding_list = langInfo.translit_encoding_list\n except:\n translit_encoding_list = None\n try:\n translit_kb_list = langInfo.translit_kb_list\n except:\n translit_kb_list = None\n\n template_values = {\n 'converters': converters,\n 'isTransLit': True,\n 'font': font,\n 'language': langInfo.Language,\n 'langTag': langInfo.LanguageCode,\n 'encodingList': encodingList,\n 'lang_list': langInfo.lang_list,\n 'kb_list': langInfo.kb_list,\n 'direction': text_direction,\n 'unicodeFonts': langInfo.unicode_font_list,\n 'links': langInfo.links,\n 'oldChars': oldChars,\n 'oldInput': oldInput,\n 'outputFont': output_font,\n 'text': text,\n 'textStrings': testStringList,\n 'translit_encoding_list': translit_encoding_list,\n 'translit_kb_list': translit_kb_list,\n 'showTools': self.request.get('tools', None),\n 'unicodeChars': unicodeChars,\n 'combiningChars': unicodeCombiningChars,\n 'variation_sequence': variation_sequence,\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/translit_general.html')\n self.response.out.write(template.render(path, template_values))\n\n\n# Exports keyboards to Keyman format\nclass KeyManHandler(webapp2.RequestHandler):\n def get(self, match=None):\n\n langInfo = self.app.config.get('langInfo')\n # To possibly limit fonts from download\n try:\n public_unicode_fonts = langInfo.public_font_resources\n except:\n public_unicode_fonts = langInfo.unicode_font_list\n\n template_values = {\n 'language': langInfo.Language,\n 'language_native': langInfo.Language_native,\n 'unicode_font_list': public_unicode_fonts,\n 'kb_list': langInfo.kb_list,\n 'langTag': langInfo.LanguageCode,\n 'font_list': langInfo.unicode_font_list,\n 'lang_list': langInfo.lang_list,\n }\n home_html = 'HTML/km_kb.html'\n path = os.path.join(os.path.dirname(__file__), home_html)\n self.response.out.write(template.render(path, template_values))\n\n\nclass AllFontTest(webapp2.RequestHandler):\n def get(self):\n langInfo = self.app.config.get('langInfo')\n try:\n public_unicode_fonts = langInfo.public_font_resources\n except:\n public_unicode_fonts = langInfo.unicode_font_list\n utext = self.request.get(\"utext\", \"\")\n encoded_text = self.request.get(\"encodedText\", \"\")\n #logging.info('AllFontTest utext =>%s<' % utext)\n template_values = {\n 'scriptName': langInfo.Language,\n 'fontFamilies': public_unicode_fonts,\n 'encodedText': encoded_text,\n 'utext': utext,\n 'language': langInfo.Language,\n 'LanguageTag': langInfo.LanguageCode\n }\n\n path = os.path.join(os.path.dirname(__file__), 'HTML/allFonts.html')\n self.response.out.write(template.render(path, template_values))\n\nclass CharacterTableHandler(webapp2.RequestHandler):\n def get(self):\n langInfo = self.app.config.get('langInfo')\n\n # String containing character name data\n try:\n charNames = langInfo.charNames.split('\\n')\n charNames = '!!!'.join(charNames)\n except:\n charNames = None\n\n template_values = {\n 'language': langInfo.Language,\n 'LanguageTag': langInfo.LanguageCode,\n 'charTable': charNames,\n 'charNameData': charNames,\n 'kb_list': langInfo.kb_list,\n 'unicode_font_list': langInfo.unicode_font_list,\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/characterNames.html')\n self.response.out.write(template.render(path, template_values))\n\nclass WordSearchHandler(webapp2.RequestHandler):\n def get(self):\n langInfo = self.app.config.get('langInfo')\n\n testGridSize = self.request.get('gfactor', 1.4)\n\n testData = self.request.get('testing', '')\n\n try:\n charNames = langInfo.charNames.split('\\n')\n charNames = '!!!'.join(charNames)\n except:\n charNames = None\n\n try:\n direction = langInfo.direction\n except:\n direction = 'ltr'\n\n try:\n combiningChars = langInfo.unicodeCombiningChars\n except:\n combiningChars = None\n\n try:\n letterFillList = langInfo.fillChars\n except:\n letterFillList = []\n #logging.info('letterFillList = %s' % letterFillList)\n \n # Make it easy to split the characters\n combiners = '||'.join(langInfo.unicodeCombiningChars)\n fillers = '||'.join(letterFillList)\n #logging.info('Diacritics: %s' % (combiners))\n template_values = {\n 'language': langInfo.Language,\n 'LanguageTag': langInfo.LanguageCode,\n 'kb_list': langInfo.kb_list,\n 'charTable': charNames,\n 'charNameData': charNames,\n 'unicodeCombiningChars': combiners,\n 'letterFillList': fillers,\n 'unicode_font_list': langInfo.unicode_font_list,\n 'testData': testData,\n 'testGridSize': testGridSize,\n 'direction': direction,\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/wordsearch.html')\n self.response.out.write(template.render(path, template_values))\n\nclass NumeralsHandler(webapp2.RequestHandler):\n def get(self):\n langInfo = self.app.config.get('langInfo')\n\n # String containing character name data\n try:\n charNames = langInfo.charNames.split('\\n')\n charNames = '!!!'.join(charNames)\n except:\n charNames = None\n\n try:\n combiningChars = langInfo.unicodeCombiningChars\n except:\n combiningChars = None\n\n try:\n letterFillList = langInfo.fillChars\n except:\n letterFillList = []\n\n try:\n combingChars = langInfo.unicodeCombiningChars\n except:\n combingChars = None\n\n try:\n numbersImage = langInfo.numbersImage\n except:\n numbersImage = None\n \n langNumerals = langInfo.LanguageCode + \"Numerals\"\n template_values = {\n 'language': langInfo.Language,\n 'LanguageTag': langInfo.LanguageCode,\n 'langNumerals': langNumerals,\n 'charTable': charNames,\n 'charNameData': charNames,\n 'unicodeCombiningChars': combiningChars,\n 'letterFillList': letterFillList,\n 'unicodeFontList': langInfo.unicode_font_list,\n 'font_list': langInfo.unicode_font_list,\n 'numbersImage': numbersImage,\n }\n path = os.path.join(os.path.dirname(__file__), 'HTML/numerals.html')\n self.response.out.write(template.render(path, template_values))\n\n\nclass CalendarHandler(webapp2.RequestHandler):\n\n def get(self):\n langInfo = self.app.config.get('langInfo')\n\n try:\n weekDays = langInfo.weekDays\n except:\n weekDays = None\n\n try:\n months = langInfo.months\n except:\n months = None\n\n template_values = {\n 'language': langInfo.Language,\n 'languageTag': langInfo.LanguageCode,\n 'langNumerals': langInfo.LanguageCode + \"Numerals\",\n 'kb_list': langInfo.kb_list,\n 'months': months,\n 'weekdays': weekDays,\n 'unicodeFontList': langInfo.unicode_font_list,\n }\n \n path = os.path.join(os.path.dirname(__file__), 'HTML/calendar.html')\n self.response.out.write(template.render(path, template_values))\n\n \n\n# Error catching\ndef handle_301(request, response, exception):\n logging.exception(exception)\n response.write('301 error.\\n\\n')\n response.write('Request = %s\\n' % request.url)\n response.set_status(301)\n\n\ndef handle_404(request, response, exception):\n logging.exception(exception)\n response.write('Sorry, but we do not have that page in BASE. Please check your link and try again.\\n\\n')\n response.write('Request = %s\\n' % request.url)\n response.set_status(404)\n\n\ndef handle_500(request, response, exception):\n logging.exception(exception)\n response.write('A server error occurred!\\n\\n')\n response.write('Request = %s\\n' % request.url)\n response.set_status(500)\n\n\napp = webapp2.WSGIApplication(\n [\n ],\n debug=True,\n config={'langInfo': instance}\n)\n\napp.router.add((basePath + '/downloads/', Downloads))\napp.router.add((basePath + '/encodingRules/', EncodingRules))\napp.router.add((basePath + '/', LanguagesHomeHandler))\napp.router.add((basePath + '/dictionaryInput/', DictionaryInput))\napp.router.add((basePath + '/kbtransforms/', KeyboardTransforms))\napp.router.add((basePath + '/collation/', CollationHandler))\napp.router.add((basePath + '/combos/', RenderPage))\napp.router.add((basePath + '/keyman/', KeyManHandler))\napp.router.add((basePath + '/AllFonts/', AllFontTest))\napp.router.add((basePath + '/charTable/', CharacterTableHandler)),\napp.router.add((basePath + '/wordsearch/', WordSearchHandler)),\n\napp.router.add((basePath + '/numerals/', NumeralsHandler)),\n\n\napp.router.add((basePath + '/games/generatewordsearch/',\n games.GenerateWordSearchHandler)),\n\napp.router.add((basePath + '/phonetickb/', PhoneticKbHandler)),\n\n# Future\n#app.router.add((basePath + '/games/generatewordsearchDFS/',\n# games.GenerateWordSearchDFSHandler)),\n\napp.error_handlers[301] = handle_301\napp.error_handlers[404] = handle_404\napp.error_handlers[500] = handle_500\n","repo_name":"sven-oly/LanguageTools","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":31119,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"44"} +{"seq_id":"36887145298","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef Z(x,y):\n return np.prod([y,y])-np.prod([x,x])\n\ndef domeniu():\n X=np.arange(-100,100,1)\n Y=np.arange(-20,20,0.2)\n G=Z(X,Y)\n return X,Y,G\n\ndef plotare(X,Y,G):\n plt.plot(X,Y,G,color=\"red\")\n plt.xlabel(\"X\")\n plt.ylabel(\"Y\")\n plt.show()\n\nX,Y,G=domeniu()\nplotare(X,Y,G)\n","repo_name":"AlexMuresan1/Python_exercises","sub_path":"Lucrarea_6_Exercitiul_10_Muresan_Alexandru.py","file_name":"Lucrarea_6_Exercitiul_10_Muresan_Alexandru.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"6794680200","text":"from fabric.api import task, sudo, run, local, put, hide\nfrom fabric.contrib.files import append\nfrom uuid import uuid4\n\nfrom ..settings import PROJECT_USER, PROJECT_GROUP\n\n__all__ = [\n 'mkdir', 'chmod', 'set_owner', 'create_dir', 'gzip_dir', 'tail',\n 'rm', 'append_to_file', 'touch', 'make_tmp_file']\n\n@task\ndef mkdir(path, use_sudo=True):\n \"\"\"Creates a directory.\"\"\"\n command = sudo if use_sudo else run\n command('mkdir -p %s' % path)\n\n\n@task\ndef chmod(path, mode):\n \"\"\"Change fs object permissions.\"\"\"\n sudo('chmod %s %s' % (mode, path))\n\n\n@task\ndef set_owner(path, user, group):\n \"\"\"Sets owner for path contents recursively.\"\"\"\n sudo('chown -R %s:%s %s' % (user, group, path))\n\n\n@task\ndef create_dir(path):\n \"\"\"Prepares a directory (creates it and sets owner).\"\"\"\n mkdir(path)\n set_owner(path, PROJECT_USER, PROJECT_GROUP)\n\n\n@task\ndef gzip_dir(src, target_fname, change_dir=None, do_sudo=False):\n \"\"\"GZips a directory.\"\"\"\n arch_ext = '.tar.gz'\n\n if arch_ext not in target_fname:\n target_fname = '%s%s' % (target_fname, arch_ext)\n\n change_dir = change_dir or ''\n if change_dir:\n change_dir = '-C %s' % change_dir\n\n command = sudo if do_sudo else run\n command('tar -czf %s %s %s' % (target_fname, change_dir, src))\n\n return target_fname\n\n\n@task\ndef tail(fname):\n \"\"\"Tails a file to output.\"\"\"\n sudo('tail -f %s' % fname)\n\n\n@task\ndef rm(target, force=True, use_local=True):\n \"\"\"Removes target file or directory recursively.\"\"\"\n command = local if use_local else sudo\n command('rm -r%s %s' % ('f' if force else '', target))\n\n\n@task\ndef append_to_file(string, fpath):\n \"\"\"Appends a string into file.\"\"\"\n with hide('running'):\n append(fpath, string, use_sudo=True)\n\n\n@task\ndef touch(fpath):\n \"\"\"Creates a file or updates modified date if already exists.\"\"\"\n run('touch %s' % fpath)\n\n\ndef make_tmp_file(contents):\n \"\"\"Makes a temporary file with the given context.\"\"\"\n fpath = '/tmp/wscf_%s' % uuid4()\n\n with open(fpath, 'w') as f:\n f.write(contents)\n\n append_to_file(contents, fpath)\n put(fpath, fpath, use_sudo=True)\n return fpath\n","repo_name":"idlesign/webscaff_legacy","sub_path":"webscaff/fabric/sys/fs.py","file_name":"fs.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"35647899782","text":"import re\nimport asyncio\nimport io\nfrom math import log10\nfrom string import ascii_lowercase\nimport difflib\nimport aiohttp\nfrom discord.ext import commands as cmd\n\n\nasync def get_closest_match(arg, items):\n if not arg.strip() or arg is None:\n return None, None\n names = [snake(i[\"Name\"]) for i in items]\n closest_match = difflib.get_close_matches(arg, names, n=1, cutoff=0.85)\n match, data = None, None\n if closest_match:\n match = closest_match[0]\n data = next((a for a in items if snake(a[\"Name\"]) == match), None)\n if not match:\n closest_match = next((n for n in names if n.startswith(arg)), None)\n if closest_match:\n match = closest_match\n data = next((a for a in items if snake(a[\"Name\"]) == closest_match), None)\n if not match:\n closest_match = next(\n (\n n\n for n in names\n if \"\".join([x[0] for x in n.lower().split(\"_\")]) == arg.lower()\n ),\n None,\n )\n if closest_match:\n match = closest_match\n data = next((a for a in items if snake(a[\"Name\"]) == closest_match), None)\n return match, data\n\n\ndef snake(text):\n return text.lower().replace(\" \", \"_\").replace(\"'\", \"\").replace(\"-\", \"\")\n\n\ndef snake_get(func, term, arr):\n return next((a for a in arr if snake(func(a)) == term), None)\n\n\ndef rotate(table, mod):\n \"\"\"Rotate a list.\"\"\"\n return table[mod:] + table[:mod]\n\n\ndef lget(_list, idx, default):\n \"\"\"Safely get a list index.\"\"\"\n try:\n return _list[idx]\n except IndexError:\n return default\n\n\nasync def download_image(path):\n async with aiohttp.ClientSession() as session:\n async with session.get(path) as response:\n buffer = io.BytesIO(await response.read())\n\n return buffer\n\n\nasync def choose_from(ctx, choices, text, timeout=10):\n chooser = await ctx.send(text)\n\n def check(m):\n if m.author == ctx.author:\n return m.content.isnumeric() or m.content.lower().strip() == \"c\"\n return False\n\n try:\n msg = await ctx.bot.wait_for(\"message\", check=check, timeout=timeout)\n except asyncio.TimeoutError:\n await msg.channel.send(f\"**{ctx.author}**'s query timed out.\")\n await chooser.delete()\n raise cmd.BadArgument(\"Timed out\")\n else:\n if msg.content.lower() == \"c\":\n await chooser.delete()\n await msg.channel.send(f\"**{ctx.author}**'s query was cancelled.\")\n raise cmd.BadArgument(\"Timed out\")\n i = int(msg.content) - 1\n if i > -1 and i < len(choices):\n await chooser.delete()\n return choices[i]\n\n\nasync def search_for(items, term):\n return [items.index(x) for x in items if term in x]\n\n\n__scifi = re.compile(r\"^([^a-z]+)([A-Za-z]+)$\")\n__lifi = re.compile(r\"^([0-9\\.]+)[^0-9]+([0-9,]+)$\")\n\n\nasync def ttconvert_discover(number):\n if __lifi.match(number):\n return 0\n elif __scifi.match(number):\n return 1\n return 2\n\n\nasync def ttconvert_from_scientific(number):\n number, notation = __lifi.findall(number)[0]\n notation = int(notation.replace(\",\", \"\")) - 15\n modulo = notation % 3\n exponent = notation / 3\n output = []\n while exponent > 26:\n result, remainder = divmod(exponent, 26)\n output.append(remainder)\n exponent = result\n output.append(exponent)\n multiple = pow(10, modulo)\n l = len(output)\n if l > 2:\n output = [x for x in output[: -(l - 2)]] + [\n max(x - 1, 0) for x in output[-(l - 2) :]\n ]\n last_result = \"\".join([ascii_lowercase[int(last)] for last in output[::-1]])\n if len(last_result) == 1:\n last_result = \"a\" + last_result\n return \"{}{}\".format(int(float(number) * multiple), last_result)\n\n\nasync def ttconvert_to_scientific(number):\n number, letter = __scifi.findall(number)[0]\n map_to_alpha = [ascii_lowercase.index(x) for x in letter.lower()]\n a_to_one = [x + 1 for x in map_to_alpha[:-2]] + map_to_alpha[-2:]\n dict_map = dict(enumerate(a_to_one))\n map_to_alpha = [pow(26, x) for x in list(dict_map.keys())[::-1]]\n result = sum([x * a_to_one[i] for i, x in enumerate(map_to_alpha)])\n magnitude = int(log10(float(number)))\n number = float(number) / max((pow(10, magnitude)), 1)\n return \"{}e{:,}\".format(number, result * 3 + 15 + magnitude)\n","repo_name":"alexogeny/astutus","sub_path":"modules/utils/etc.py","file_name":"etc.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"43369364552","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 18 19:20:04 2023\n\n@author: Amani\n\"\"\"\n\nfrom SHA_1 import get_digest\nfrom BruteForce import BF\nfrom Random import R\nimport time as t\nimport string as STR\n\ndef rep_query():\n r = input('''\n _________________________________________\n \n How many times do you suspect a character \n can appear in the message? ''')\n return int(r)\n\ndef digest_query():\n d = input('''\n _____________________________________________\n \n Insert digest: (e.g. 950bfe587a64c98e435d811a6c43097ec6d2546d)\n \n ''')\n return d\n\ndef get_pw_len():\n pl = input('''\n _______________________________________\n \n Please guess the length of the message: ''')\n return int(pl)\n\ndef get_method():\n \n m = input(\"\"\"\n __________________________________________________\n \n Choose Methodology:\n \n Brute Force words -> type 1\n Generate Random Words -> type 2\n \n then press enter.\n \n choice? \"\"\")\n \n if m not in ['1','2']:\n m = get_method()\n \n return int(m)\n \ndef get_char_s():\n \n choices = [\"LC\",\"UC\",\"D\",\"SP\",\"P\",\"ALL\"]\n \n def _get_char_s(): #Getting users preference in what type of data to produce\n \n \n \n p_choice = input(\"\"\"\n \n Possible elements in message:\n \n Lowercase -> \"LC\"\n Uppercase -> \"UC\"\n Digits -> \"D\"\n A possible space -> \"SP\"\n Punctuations - > \"P\"\n All the above -> \"ALL\"\n\n If multiple choices, then separate each by a comma (,)\n \n \n \"\"\")\n print()\n \n #Convert choice inputs into list array of those choices \n #(if there are more than one)\n if ',' in p_choice:\n #Automatically splits every string element based \n #on the specified arguement splitter\n p_choice = p_choice.split(\",\") \n #If singular invalid choice made\n if type(p_choice) == str: \n if p_choice.upper() not in choices: \n print(\"%s was not one of the options\" % p_choice)\n p_choice = _get_char_s()\n \n else: #If a list of choices were made with invalid inputs\n for i in p_choice:\n if i.upper() not in choices:\n p_choice = _get_char_s()\n if i.upper() == \"ALL\": #if ALL is in the choices\n print(\"Invalid input of 'ALL' with other choices\")\n p_choice == _get_char_s()\n return p_choice\n \n #tranlates users given characters into string of those characters\n def add_characters(p_choice, char_s):\n p_choice = p_choice.upper()\n ls = [STR.ascii_lowercase, STR.ascii_uppercase, \n STR.digits, ' ', STR.punctuation]\n for i in range(len(ls)):\n if p_choice == choices[i]: \n char_s += ls[i]\n break\n elif p_choice == 'ALL':\n char_s = ''.join(ls)\n \n return char_s\n \n p_choice = _get_char_s()\n \n char_s = ''\n #if there are a list of choices\n if type(p_choice) == list:\n for p_c in p_choice:\n p_c = p_c.upper() #convert to uppercase\n char_s = add_characters(p_c, char_s)\n \n else: #if user only chose a singular choice\n p_choice = p_choice.upper() #convert to uppercase\n char_s = add_characters(p_choice, char_s)\n \n return char_s\n \n \ndef main():\n \n typ = int(input('''\n message to digest -> type 1\n digest to message -> type 2\n \n '''))\n \n if typ == 1:\n digest = get_digest(input('''\n insert message below:\n \n '''))\n \n print()\n print(\"digest = %s\" % digest)\n print()\n elif typ == 2:\n \n digest = digest_query() #Unreversable cryptography of message\n method = get_method() #choosing between methods provided\n rep = rep_query() #number of times a letter can repeat\n p_len = get_pw_len() #length of password\n char_s = get_char_s() #get list of possible charcaters\n start = t.time() #start timer\n \n if method == 1:\n \n print('please wait as messages of length %d are being tested via brute force method' % p_len)\n print()\n \n BF(digest, p_len, rep, char_s, start)\n \n elif method == 2:\n \n print('please wait as random messages of length %d are being tested via random method' % p_len)\n print()\n \n R(digest, p_len, rep, char_s, start)\n #end of time and time taken is handled within Random.py file.\n \nmain()\n ","repo_name":"Amani5576/SHA-1-cryptography","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"24727917586","text":"# coding=utf-8\nfrom numpy import *\n\n\ndef loadDataSet(fileName):\n \"\"\"\n 加载数据集\n :param fileName:str, 数据集文件路径\n :return:list, like array\n \"\"\"\n dataMat = []\n fr = open(fileName)\n for line in fr.readlines():\n curLine = line.strip().split('\\t')\n fltLine = map(float, curLine)\n dataMat.append(list(fltLine))\n return dataMat\n\n\n# 计算两个向量的距离,用的是欧几里得距离\ndef distEclud(vecA, vecB):\n return sqrt(sum(power(vecA - vecB, 2)))\n\n\n# 随机生成初始的质心(ng的课说的初始方式是随机选K个点)\ndef randCent(dataSet, k):\n n = shape(dataSet)[1]\n centroids = mat(zeros((k, n)))\n for j in range(n):\n minJ = min(dataSet[:, j])\n rangeJ = float(max(array(dataSet)[:, j]) - minJ)\n centroids[:, j] = minJ + rangeJ * random.rand(k, 1)\n return centroids\n\n\ndef kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):\n \"\"\"\n 聚类算法\n :param dataSet:matrixlib.defmatrix.matrix, 数据集矩阵\n :param k:int, k参数, 聚类的类型数量\n :param distMeas:function, 计算两个向量的距离,用的是欧几里得距离\n :param createCent:function, # 随机生成初始的质心\n :return:\n \"\"\"\n # shape(dataSet) 矩阵的尺寸为 (100, 3)\n m = shape(dataSet)[0]\n # 生成一个0元素矩阵 (100,2)\n clusterAssment = mat(zeros((m, 2))) # create mat to assign data points\n # print(clusterAssment)\n # to a centroid, also holds SE of each point\n centroids = createCent(dataSet, k) # 随机指定聚类类型的中心\n # print(centroids)\n clusterChanged = True\n while clusterChanged:\n clusterChanged = False\n # 为每个数据点分配最接近的质心 for each data point assign it to the closest centroid\n # 遍历数据总列数 100\n for i in range(m):\n # inf表示一个无限大的正数\n minDist = inf\n minIndex = -1\n # 遍历k值 分别计算到几个质心的距离并且记录索引值\n for j in range(k):\n # 计算每个质点和不同质心的距离\n # print(\"{} -- {}\".format(centroids[j, :], dataSet[i, :]))\n distJI = distMeas(centroids[j, :], dataSet[i, :])\n if distJI < minDist:\n minDist = distJI\n minIndex = j\n print(\"--------\")\n print(\"{} {} 【{}】 {}\".format(minDist, minIndex, i, clusterAssment[i, 0]))\n # 0矩阵每一行的第一列的数字不等于指定的质心序号\n if clusterAssment[i, 0] != minIndex:\n print(\"fff\")\n clusterChanged = True\n # 0矩阵 赋值为 质心序号,最小距离的平方\n clusterAssment[i, :] = minIndex, minDist ** 2\n # print(centroids)\n for cent in range(k): # recalculate centroids\n ptsInClust = dataSet[nonzero(clusterAssment[:, 0].A == cent)[0]] # get all the point in this cluster\n centroids[cent, :] = mean(ptsInClust, axis=0) # assign centroid to mean\n return centroids, clusterAssment\n\n\ndef show(dataSet, k, centroids, clusterAssment):\n from matplotlib import pyplot as plt\n numSamples, dim = dataSet.shape\n mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '\\d+)/$', pasajero_edit, name='pasajero_editar'),\n url(r'^eliminar/(?P\\d+)/$', pasajero_delete, name='pasajero_eliminar'),\n]","repo_name":"AlejoJr/marcianos","sub_path":"projectmarcianos/apps/pasajero/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"36229496523","text":"\nfrom decimal import Decimal as D\n\nsearch = None\n\n\ndef f(N):\n\n \"\"\"\n\n sqrt(search) = N\n search = N^2\n 0 = N^2 - search\n\n task : find N value which will make above equation true\n\n \"\"\"\n\n return D(N)**2 - D(search)\n\n\ndef derivative_numerical(f, x):\n h = D(\"1e-20\")\n return (f(x+h)-f(x-h)) / (h*2)\n\n\ndef newton_method(iteration):\n\n root = D(1.0) # set initial value\n\n for i in range(iteration):\n print(\"i = \" + str(i+1) + \", N = \" + str(root))\n newroot = root - f(root) / derivative_numerical(f, root)\n if newroot == root:\n break\n root = newroot\n\n\ndef main():\n global search\n search = input(\"Enter any number to find its square root : \")\n newton_method(search)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"shahril96/snippets","sub_path":"random-codes/sqrtN.py","file_name":"sqrtN.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"71775924604","text":"import signal\nimport subprocess\nimport threading\nimport time\n\n\nclass PlaySong(threading.Thread):\n _SLEEP_DURATION = 0.1 # this is the number of seconds to sleep before re-checking conditions\n\n def __init__(self, pi_player, file_name):\n super().__init__(target=self._begin_playing_song, args=[file_name])\n self._user_play_pause = threading.Event()\n self._user_stop = threading.Event()\n self._is_song_playing = True\n self._song_player_proc = None\n self.pi_player = pi_player\n\n def _begin_playing_song(self, file_name):\n SONG_PLAYER_CMD = [\"play\", '-q', file_name, '-t', 'alsa']\n self._song_player_proc = subprocess.Popen(SONG_PLAYER_CMD)\n while True:\n # if the user hit stop, terminate the subprocess then return\n if self._is_user_stopped():\n self._song_player_proc.terminate()\n return\n # if the user hit play/pause, send either SIGSTOP or SIGCONT\n elif self._is_user_played_paused():\n self._song_player_proc.send_signal(signal.SIGSTOP) if self._is_song_playing else self._song_player_proc.send_signal(signal.SIGCONT)\n self._is_song_playing = not self._is_song_playing\n self._clear_user_play_pause()\n # if subprocess completed, call piplayer.next then return\n elif self._song_player_proc.poll() is not None:\n self.pi_player.play_next_song()\n return\n # otherwise, sleep for small amount of time then repeat\n else:\n time.sleep(PlaySong._SLEEP_DURATION)\n\n def user_stop(self):\n self._user_stop.set()\n\n def user_play_pause(self):\n self._user_play_pause.set()\n\n def _is_user_stopped(self):\n return self._user_stop.is_set()\n\n def _is_user_played_paused(self):\n return self._user_play_pause.is_set()\n\n def _clear_user_play_pause(self):\n self._user_play_pause.clear()\n","repo_name":"ammoniha7/Python-pi-audio-player","sub_path":"playsong.py","file_name":"playsong.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"18379267336","text":"# -*- mode: Python; coding: utf-8 -*-\nfrom classifier import Classifier\nfrom corpus import Document, BlogsCorpus\nfrom random import shuffle, seed\nfrom math import log\nimport nltk\nfrom nltk.tokenize import word_tokenize\n\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.stem.lancaster import LancasterStemmer\nfrom nltk.stem import RSLPStemmer\n\nfrom nltk.corpus import stopwords\n\nimport numpy\nimport string\n\n\nclass NaiveBayes(Classifier):\n \"\"\"A naïve Bayes classifier.\"\"\"\n\n def __init__(self, model={}):\n super(NaiveBayes, self).__init__(model)\n \n def get_model(self):\n return self.myModel\n\n def set_model(self, model):\n self.myModel = model\n\n model = property(get_model, set_model)\n\n def train(self, instances):\n \"\"\"Construct a statistical model from labeled instances.\"\"\"\n self.myModel = {}\n count = 0\n for instance in instances:\n # Use dictionary for myModel\n if instance.label == \"\":\n continue\n count += 1\n if instance.label not in self.myModel:\n self.myModel[instance.label] = Label()\n lbl = self.myModel[instance.label]\n lbl.docCnt += 1\n for feature in instance.features():\n lbl.rvRec[feature] = lbl.rvRec.get(feature, 0) + 1\n lbl.rvCnt += 1\n \n for _, lbl in self.myModel.iteritems():\n lbl.log_doc_prob = log(lbl.docCnt) - log(count)\n\n def classify(self, instance):\n \"\"\"Classify an instance and return the expected label.\"\"\"\n result, prob, curprob = None, None, 0\n\n for label, lbl in self.myModel.iteritems():\n curprob = lbl.log_doc_prob\n\n for feature in instance.features():\n curprob += lbl.log_feature_prob(feature)\n\n if prob is None or prob < curprob:\n prob = curprob\n result = label\n return result\n\n\nclass BagOfWords(Document):\n def features(self):\n return self.data.split()\n\n\nclass BagOfWordsStemmed(Document):\n def features(self):\n \"\"\"Stem the word\"\"\"\n st = RSLPStemmer() \n return [st.stem(word) for word in self.data.split()]\n\n\nclass BagOfWordsTokenized(Document):\n def features(self):\n \"\"\"Trivially tokenized words.\"\"\"\n return word_tokenize(self.data)\n\n\nclass BagOfWordsWithoutPunctunation(Document):\n def features(self):\n exclude = set(string.punctuation)\n out = []\n for word in self.data.split():\n out.append(''.join(ch for ch in word if ch not in exclude)) \n return out\n\n\nclass NGram(Document):\n def features(self):\n \"\"\"Use N gram to extract feature \"\"\"\n n = 2\n data = self.data.split()\n st = RSLPStemmer() \n data = [st.stem(word) for word in self.data.split()]\n out = []\n for i in range(n, len(self.data.split()) - n + 1):\n out.append(data[i - n:i])\n out.append(data[i + 1:i + n])\n return [' '.join(x) for x in out]\n\n\nclass FMeasure(Document):\n def features(self):\n tagWords = nltk.pos_tag(word_tokenize(self.data))\n tag_fd = nltk.FreqDist(tag for (word, tag) in tagWords)\n return dict((key, value) for key, value in tag_fd.most_common())\n\n\nclass Label(object):\n def __init__(self):\n self.docCnt = 0\n self.rvCnt = 0 # rv for random variable\n self.rvRec = {}\n\n def log_feature_prob(self, feature):\n freq = self.rvRec.get(feature, 0)\n return log(freq + 0.1) - log(self.rvCnt + 0.1 * len(self.rvRec))\n\n\ndef split_blogs_corpus(document_class, count=3000):\n \"\"\"Split the blog post corpus into trainin5 and test sets\"\"\"\n blogs = BlogsCorpus(document_class=document_class)\n seed(hash(\"blogs\"))\n shuffle(blogs)\n return (blogs[:count], blogs[count:])\n\n\ndef accuracy(classifier, test):\n \"\"\"Find the performance of model\"\"\"\n correct = [classifier.classify(x) == x.label for x in test]\n return float(sum(correct)) / len(correct)\n\n\nif __name__ == '__main__':\n train, test = split_blogs_corpus(NGram)\n classifier = NaiveBayes()\n classifier.train(train)\n classifier.save(\"myModel.dat\")\n print(accuracy(classifier, test))\n","repo_name":"luckyhusky/Naive_Bayes_Classifier","sub_path":"code/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"22248802664","text":"r\"\"\"\nAn example of compilation from a dynamic quantum circuit into a static one.\n\"\"\"\n\nfrom Extensions.QuantumNetwork.qcompute_qnet.quantum.circuit import Circuit\nfrom Extensions.QuantumNetwork.qcompute_qnet.quantum.backends import Backend\nfrom Extensions.QuantumNetwork.qcompute_qnet.quantum.utils import plot_results\n\n# Create a quantum circuit\ncir = Circuit()\n\n# Quantum state evolution\ncir.h(0)\ncir.h(1)\ncir.h(2)\ncir.cnot([0, 1])\n\n# Mid-circuit measurement on qubit 1\ncir.measure(1, mid=\"a\")\n\n# Classically controlled operation on qubit 2 based on the measurement outcome of 'a'\ncir.x(2, condition=\"a\")\n\n# Reset qubit 1\ncir.reset(1)\n\n# Quantum state evolution and measurements\ncir.h(1)\ncir.measure(2, mid=\"b\")\ncir.z(1, condition=\"b\")\ncir.cz([1, 0])\ncir.measure(0, mid=\"c\")\ncir.measure(1, mid=\"d\")\n\n# Set 'output_ids' for the final sampling results\ncir.output_ids = [\"a\", \"b\", \"c\", \"d\"]\n\n# Print the dynamic quantum circuit\ncir.print_circuit()\n# Run simulation with QNET StateVector backend\ncounts1 = cir.run(shots=8192, backend=Backend.QNET.StateVector)[\"counts\"]\n\n# Compile the dynamic circuit into a static quantum circuit\ncir.to_static()\ncir.print_circuit()\n\n# Run simulation for the compiled static quantum circuit\ncounts2 = cir.run(shots=8192, backend=Backend.QNET.StateVector)[\"counts\"]\n\n# Visualization of simulation results\nplot_results([counts1, counts2], [\"dynamic circuit\", \"static circuit\"])\n","repo_name":"baidu/QCompute","sub_path":"Extensions/QuantumNetwork/examples/example-dynamic_to_static.py","file_name":"example-dynamic_to_static.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"41"} +{"seq_id":"23333854417","text":"import re\n\n\nclass Defect:\n \"\"\"Define the general data structure of Defects\"\"\"\n\n def __init__(self, source_map, pcs):\n self.source_map = source_map\n self.pcs = self._rm_general_false_positives(pcs)\n if source_map:\n self.warnings = self._warnings()\n\n def is_defective(self):\n return bool(self.pcs)\n\n def get_warnings(self):\n return self.warnings\n\n def _rm_general_false_positives(self, pcs):\n new_pcs = pcs\n if self.source_map:\n new_pcs = self._rm_pcs_having_no_source_code(new_pcs)\n new_pcs = self._reduce_pcs_having_the_same_pos(new_pcs)\n return new_pcs\n\n def _rm_pcs_having_no_source_code(self, pcs):\n return [pc for pc in pcs if self.source_map.get_source_code(pc)]\n\n def _reduce_pcs_having_the_same_pos(self, pcs):\n d = {}\n for pc in pcs:\n pos = str(self.source_map.instr_positions[pc])\n if pos not in d:\n d[pos] = pc\n return d.values()\n\n def _warnings(self):\n warnings = []\n for pc in self.pcs:\n source_code = self.source_map.get_source_code(pc)\n if not source_code:\n continue\n\n source_code = self.source_map.get_buggy_line(pc)\n s = self._warning_content(pc, source_code)\n if s:\n warnings.append(s)\n return warnings\n\n def _warning_content(self, pc, source_code):\n new_line_idx = source_code.find(\"\\n\")\n source_code = source_code.split(\"\\n\", 1)[0]\n location = self.source_map.get_location(pc)\n\n source = re.sub(self.source_map.root_path, \"\", self.source_map.get_filename())\n line = location[\"begin\"][\"line\"] + 1\n column = location[\"begin\"][\"column\"] + 1\n s = \"%s:%s:%s: Warning: %s.\\n\" % (source, line, column, self.name)\n s += source_code\n if new_line_idx != -1:\n s += \"\\n\" + self._leading_spaces(source_code) + \"^\\n\"\n s += \"Spanning multiple lines.\"\n return s\n\n def _leading_spaces(self, s):\n stripped_s = s.lstrip(\"[ \\t]\")\n len_of_leading_spaces = len(s) - len(stripped_s)\n return s[0:len_of_leading_spaces]\n\n def __str__(self):\n s = \"\"\n for warning in self.warnings:\n s += \"\\n\" + warning\n return s.lstrip(\"\\n\")\n\n\n# define 5 defects (for more defects, declare more defect classes)\nclass ViolationDefect(Defect):\n def __init__(self, source_map, pcs):\n self.name = \"ERC721 Standard Violation Defect\"\n Defect.__init__(self, source_map, pcs)\n\n\nclass ReentrancyDefect(Defect):\n def __init__(self, source_map, pcs):\n self.name = \"ERC721 Reentrancy Defect\"\n Defect.__init__(self, source_map, pcs)\n\n\nclass RiskyProxyDefect(Defect):\n def __init__(self, source_map, pcs):\n self.name = \"Risky Mutable Proxy Defect\"\n Defect.__init__(self, source_map, pcs)\n\n\nclass UnlimitedMintingDefect(Defect):\n def __init__(self, source_map, pcs):\n self.name = \"Unlimited Minting Defect\"\n Defect.__init__(self, source_map, pcs)\n\n\nclass PublicBurnDefect(Defect):\n def __init__(self, source_map, pcs):\n self.name = \"Public Burn Defect\"\n Defect.__init__(self, source_map, pcs)\n","repo_name":"NFTDefects/nftdefects","sub_path":"NFTGuard/defect_identifier/defect.py","file_name":"defect.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"41"} +{"seq_id":"70234251005","text":"#Write a function which accepts a dictionary and an integer as input and returns an ascending sorted list \n#of all the keys whose values contain the input value. Note that the keys of this dictionary are strings \n#while the values of this dictionary are 1 Dimensional lists of integers\n\ndictionary = {\"rabbit\" : [1, 2, 3],\n \t \"kitten\" : [2, 2, 6],\n \t \"lioness\": [6, 8, 9]}\n\n#dictionary = {'Hyena': [12, 3, 0], \n#\t\t\t 'lioness': [6, 8, 9], \n#\t\t\t 'rabbit': [1, 2, 3], \n#\t\t\t 'kitten': [2, 2, 6]}\n\nnumber = 2\n\ndef value_containing_key(dictionary, number):\n\n\tfinal_list = []\n\n\tlist_keys = list(dictionary.keys()) # 4\n\tlen_values = 0\n\n\tfor name in list_keys:\n\n\t\tlen_values = len(dictionary[name]) #3\n\n\tfor x in range(len_values):\n\t\tfor y in list_keys:\n\n\t\t\tif dictionary[y][x] == number:\n\t\t\t\tif y not in final_list:\n\t\t\t\t\tfinal_list.append(y)\n\n\treturn final_list\n\n\n#print(value_containing_key(dictionary, number))\n\nprint(value_containing_key(dictionary, number))","repo_name":"chars32/edx_python","sub_path":"Weeks/Week7/Dictionaries/Excercise4.py","file_name":"Excercise4.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"4324969415","text":"#! /usr/bin/env python\n# coding:utf-8\n\nimport wx\n\nclass GaugeFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self,None,-1,'Gauge Example',size=(350,150))\n self.panel = wx.Panel(self,-1)\n self.count = 0\n self.gauge = wx.Gauge(self.panel,-1,50,(20,50),(250,25),style=wx.GA_HORIZONTAL|wx.GA_PROGRESSBAR)\n self.gauge.SetBezelFace(3)\n self.gauge.SetShadowWidth(3)\n self.Bind(wx.EVT_IDLE,self.OnIdle)\n\n def OnIdle(self,event):\n self.count = self.count + 1\n print(self.count)\n if self.count >=50:\n self.count = 0\n self.gauge.SetValue(self.count)\n\nclass App(wx.App):\n def OnInit(self):\n self.frame = GaugeFrame()\n self.frame.Show()\n return True\n\nif __name__ == \"__main__\":\n app = App()\n app.MainLoop()\n","repo_name":"fl0wjacky/wxPython","sub_path":"ch07/9_Gauge.py","file_name":"9_Gauge.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"70118683965","text":"class Solution:\n def longestValidParentheses(self, s: str) -> int:\n stack = []\n for i in range(len(s)):\n if stack and stack[-1][-1] == '(' and s[i] == ')': stack.pop()\n else: stack.append([i,s[i]])\n if not stack:\n return len(s)\n ans = stack[0][0]\n for i in range(len(stack)-1):\n ans = max(ans,stack[i+1][0]-stack[i][0]-1)\n \n return max(ans,len(s)-stack[-1][0]-1)","repo_name":"Endalebob/competitive-programing","sub_path":"32-longest-valid-parentheses/32-longest-valid-parentheses.py","file_name":"32-longest-valid-parentheses.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"41"} +{"seq_id":"1022087884","text":"#!/usr/bin/env python3\n\"\"\"\nUnwrap text and attemp to split into lines by paragraph using a heuristic.\n\"\"\"\nimport argparse\nimport sys\nimport re\nfrom txttools.util.parsertools import add_arguments\nfrom txttools.util.filetools import open_input, open_output\n\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser = add_arguments(parser)\nparser.add_argument(\n \"-e\",\n \"--paragraph_ending\",\n type=str,\n default=\".\",\n help=\"Ending characters that may be considered paragraph breaks when found before newlines. \"\n \"This heuristic is used to unwrap hard-wrapped lines without losing paragraphs. \"\n \"List characters without spaces. \"\n \"(. by default)\"\n)\n\n\ndef _clean_line(line, paragraph_ending):\n line = line.strip()\n if line == \"\":\n return \"\"\n elif line.endswith(paragraph_ending):\n return line + \"\\n\"\n else:\n return line + \" \"\n\n\ndef reformat_lines(lines, paragraph_ending):\n text = \"\".join(_clean_line(line, paragraph_ending) for line in lines)\n text = text.strip()\n return text.splitlines()\n\n\ndef last(indexed):\n return indexed[-1]\n\n\ndef clean_whitespace(line):\n return re.sub(r\"\\s+\", \" \", line)\n\n\ndef reformat_lines_iter(lines, paragraph_ending):\n buf = []\n for line in lines:\n line = clean_whitespace(line).strip()\n # Empty line after text is a paragraph break.\n # Check if the thing before ended on a paragraph ending.\n if line == \"\" and len(buf) > 0 and last(buf).strip().endswith(paragraph_ending):\n # Join buffer and strip any trailing whitespace.\n yield \"\".join(buf).strip()\n buf.clear()\n elif line != \"\":\n buf.append(line + \" \")\n # Any left over? Yield it\n if len(buf) > 0:\n yield \"\".join(buf).strip()\n\n\ndef main():\n args = parser.parse_args()\n input_file = open_input(args.file)\n output_file = open_output(args.output)\n paragraph_ending = tuple(args.paragraph_ending)\n\n for line in reformat_lines_iter(input_file, paragraph_ending):\n print(line + '\\n', file=output_file)\n\n\nif __name__ == '__main__':\n main()","repo_name":"gordonbrander/txttools","sub_path":"txttools/bin/unwrap.py","file_name":"unwrap.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"41"} +{"seq_id":"6151995990","text":"import pickle\nimport shutil\nfrom datetime import datetime\nfrom pathlib import Path\n\nfrom apiclient import errors\n\nfrom FileTreeNode import FileTreeNode as Node\nfrom utils import recursive_rmdir_parents, file_op_decorator\n\n\nclass FileTree:\n\n def __init__(self, files=None, root_folder=None):\n if files is None:\n files = []\n self.all_nodes = []\n self.all_files = []\n self.all_folders = []\n self.roots = []\n self.__build(files, root_folder)\n\n def update_dir(self, new_tree, api, backup_dir, revisions_dir):\n # Calculate diffs\n to_download, to_revision, to_move = FileTree.diff(old_files=self.all_files, new_files=new_tree.all_files)\n\n # Check that current backup state is consistent\n missing, modified = self.__check_backup_consistency(backup_dir, remote_files=new_tree.all_files)\n to_download.extend(missing)\n to_download.extend(modified)\n to_revision.extend(modified)\n\n # Print stats\n print('- Elements in old tree (+dirs): ', len(self.all_nodes))\n print('- Elements in new tree (+dirs): ', len(new_tree.all_nodes))\n print('- Elements to download: ', len(to_download))\n print('- Elements to delete (revision): ', len(to_revision))\n print('- Elements moved not modified: ', len(to_move))\n\n # Update local backup\n revision_dir = revisions_dir / Path(datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\"))\n FileTree.revise_files(backup_dir, revision_dir, to_revision)\n FileTree.move_files(backup_dir, to_move)\n FileTree.download_files(api, backup_dir, to_download)\n\n # Update last modification time for each local file in the new tree\n print('* Updating last modification time in new tree ...')\n for file in new_tree.all_files:\n try:\n file.update_local_modified_time(backup_dir)\n except FileNotFoundError:\n print('- Error: Could not update modified time of %s' % file.make_relative_path(backup_dir))\n\n def __check_backup_consistency(self, base_dir, remote_files):\n missing = []\n modified = []\n\n for file in self.all_files:\n path = file.make_relative_path(base_dir)\n\n if not path.exists():\n # Add remote file to download\n remote_index = file.find_in_list(remote_files)\n print('- File not found in local backup. A new copy will be '\n 'downloaded [%s] (%s).' % ('OK' if remote_index else 'NO COPY IN REMOTE', path))\n if remote_index:\n remote_file = remote_files[remote_index]\n missing.append(remote_file)\n\n elif path.stat().st_mtime != file.last_local_update:\n # Add remote file to download\n remote_index = file.find_in_list(remote_files)\n remote_file = remote_files[remote_index]\n print('- File was unexpectedly modified in local backup. This file will be moved to revision and a '\n 'new copy will be downloaded [%s] (%s).' % ('OK' if remote_index else 'NO COPY IN REMOTE', path))\n if remote_index:\n modified.append(remote_file)\n\n if not missing and not modified:\n print('- Backup consistency check is ok')\n\n return missing, modified\n\n # Static -----------------------------------------------------------------------------------------------------------\n\n @staticmethod\n def saver(tree, path: Path):\n path.parents[0].mkdir(parents=True, exist_ok=True)\n pickle.dump(tree, path.open('wb'))\n\n @staticmethod\n def loader(path: Path):\n return pickle.load(path.open('rb'))\n\n @staticmethod\n def diff(old_files, new_files):\n to_download = []\n to_move = []\n to_revision = []\n\n for old_file in old_files:\n # Find matching file in new_files list\n new_file = next((f for f in new_files if f == old_file), None)\n # Check if file was deleted, modified or moved\n was_del = new_file is None\n was_mod = old_file.was_modified_in(new_file) if new_file else False\n was_mov = old_file.was_moved_in(new_file) if new_file else False\n\n if was_del:\n to_revision.append(old_file)\n elif was_mod:\n to_download.append(new_file)\n to_revision.append(old_file)\n elif was_mov:\n to_move.append((old_file, new_file))\n\n # Add files that are new\n to_download.extend([f for f in new_files if f not in old_files])\n return to_download, to_revision, to_move\n\n @staticmethod\n def download_files(api, base_dir, files):\n fails = []\n generator = file_op_decorator(files,\n '* Downloading new files. Updates may be slow for large files',\n '* Download new files, DONE')\n for file in generator:\n path = file.make_relative_path(base_dir)\n try:\n path.parents[0].mkdir(exist_ok=True, parents=True)\n if file.is_google_file():\n api.export_file(file.gid, path)\n else:\n api.get_file(file.gid, path)\n except errors.HttpError as e:\n if e.resp.status != 403: # File too big to export\n fails.append('%s [HTTP Error %s. %s]' % (path, e.resp.status, e._get_reason()))\n else:\n FileTree.__try_using_export_from_link(fails, file, api, path)\n except Exception as e:\n if not file.is_google_file():\n fails.append('%s [%s]' % (path, str(e)))\n else:\n FileTree.__try_using_export_from_link(fails, file, api, path)\n\n for path in fails:\n print('- Error: %s' % path)\n\n @staticmethod\n def __try_using_export_from_link(fails, file, api, path):\n try:\n api.download_export_from_link(file.export_links['application/pdf'], path)\n except Exception as e:\n print('%s [%s]' % (path, str(e)))\n fails.append('%s [%s]' % (path, str(e)))\n\n @staticmethod\n def revise_files(base_dir, revision_dir, files):\n fails = []\n generator = file_op_decorator(files, '* Moving deleted files', '* Moving deleted files, DONE')\n\n for file in generator:\n try:\n FileTree.move_file(origin=file.make_relative_path(base_dir),\n destination=revision_dir / Path(file.gid[:5] + '_' + file.name),\n remove_parents_until=base_dir)\n except Exception as e:\n fails.append(e)\n for path in fails:\n print('- Error: %s' % path)\n\n @staticmethod\n def move_files(base_dir, files):\n fails = []\n generator = file_op_decorator(files, '* Moving moved files', '* Moving moved files, DONE')\n\n for pair in generator:\n try:\n FileTree.move_file(origin=pair[0].make_relative_path(base_dir),\n destination=pair[1].make_relative_path(base_dir),\n remove_parents_until=base_dir)\n except Exception as e:\n fails.append(e)\n for path in fails:\n print('- Error: %s' % path)\n\n @staticmethod\n def move_file(origin: Path, destination: Path, remove_parents_until=None):\n if not origin.exists():\n raise Exception('%s [File was not found in the backup]' % origin)\n try:\n destination.parents[0].mkdir(exist_ok=True, parents=True)\n shutil.move(str(origin), str(destination))\n if remove_parents_until:\n recursive_rmdir_parents(origin.parents[0], remove_parents_until)\n except Exception as e:\n raise Exception('%s => %s [%s]' % (origin, destination, str(e)))\n\n # Private ----------------------------------------------------------------------------------------------------------\n\n def __build(self, files, root_folder):\n self.__fill_nodes_and_roots(files)\n self.__build_tree()\n self.__remove_orphan(root_folder)\n self.all_files = [f for f in self.all_nodes if f.is_file()]\n self.all_folders = [f for f in self.all_nodes if f.is_google_folder()]\n print('(%d orphan/trashed files or dirs ignored)' % (len(files) - len(self.all_nodes)))\n\n def __fill_nodes_and_roots(self, files, ignore_trashed=True):\n # Convert files and folders data (json) into nodes\n self.all_nodes = []\n self.roots = []\n for f in files:\n if not ignore_trashed or not f['trashed']:\n node = Node.from_json(f)\n self.all_nodes.append(node)\n if not node.parent:\n self.roots.append(node)\n\n def __build_tree(self):\n # Build the actual tree using node.parent property which contains the immediate parent id\n for file in self.all_nodes:\n file.parent = next((folder for folder in self.all_nodes if file.parent == folder.gid), None)\n if file.parent:\n file.parent.children.append(file)\n\n def __remove_orphan(self, root_folder):\n # Find nodes that do not belong to the desired root folder if specified\n if root_folder:\n self.all_nodes = [f for f in self.all_nodes if f.get_top_node().name == root_folder]\n","repo_name":"Teidesat/GDrive-Backup","sub_path":"src/FileTree.py","file_name":"FileTree.py","file_ext":"py","file_size_in_byte":9628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"33143937585","text":"#-----------------------------------------------------------------------------#\n#positional_calibration.py\n#\n#NPS Night Skies Program\n#\n#Last updated: 2021/02/19\n#\n#This script corrects for fisheye lens distortion, puts the zenith to the center\n#of images, and rotate the images to make the north pointing up. Correction or \n#the fisheye lens distortion and aligning the zenith to the image center have \n#not been implemented. The output overwrites the inmages in the calibrated data \n#folder. \n#\n#Input: \n# (1) center.txt\n#\t(2) reference image for observing time\n#\t(3) mask\n# (4) sky images\n#\n#Output:\n# (1) sky images corrected for rotation, (centering, and distortion) \n#\n#History:\n#\tLi-Wei Hung -- Created \n#\n#-----------------------------------------------------------------------------#\nimport ast\nimport numpy as n\nimport pandas as pd\n\nfrom astropy.coordinates import EarthLocation\nfrom astropy.io import fits\nfrom astropy.time import Time\nfrom glob import glob\nfrom skimage.transform import rotate\n\n# Local Source\nimport process_input as p \nfrom sphericalgeometry import DistanceAndBearing\n\n#-----------------------------------------------------------------------------#\n\ndef main():\n\t\"\"\"\n\tThis script performs distortion correction and centering. See the script \n\tdescription for detail.\n\t\"\"\"\n\t\n\t#Read in the coordinates of the image center\n\ttry:\n\t\tfile = open(p.data_cal+'center.txt', \"r\")\n\texcept(FileNotFoundError):\n\t\tprint('center.txt is not found. Position calibration is not performed.')\n\t\treturn\n\tcenter = ast.literal_eval(file.read())\n\tfile.close()\n\tcenter_ra = center['ra']\n\tcenter_de = center['dec']\n\tori = round(center['orientation'],1)\n\t\n\t#Compute zenith RA and Dec based on the observing location and time\n\thdu = fits.open(p.data_cal+p.reference, fix=False)\n\thdr = hdu[0].header\n\ttime = Time(hdr['DATE-OBS']) #UTC observing date and time\n\tc = EarthLocation(lon=hdr['SITELONG'] , lat=hdr['SITELAT'])\n\tzenith_ra = time.sidereal_time('mean',longitude=c.lon.deg).degree\n\tzenith_de = c.lat.deg\n\thdu.close()\n\t\n\t#Compute the offset of the image centroid\n\tdab = DistanceAndBearing(center_de,center_ra,zenith_de,zenith_ra)\t\n\toffset_distance = round(90-dab[0],2)\n\toffset_bearing = round(dab[1],2)\n\tprint(f'Zenith is offset from the center by {offset_distance} degrees')\n\tprint(f'Zenith bearing from the center is {offset_bearing} degrees')\n\n\t\n\t#Mask - read in the fisheye mask center coordinates and radius\n\tC = pd.read_csv(p.calibration+'imagecenter.csv',index_col=0)\n\txc = C['Xcenter'][p.camera]\n\tyc = C['Ycenter'][p.camera]\n\t\n\t#Position calibration\n\tfor f in glob(p.data_cal+'*sky*.fit'):\n\t\timage = fits.open(f,uint=False,mode='update')\n\t\t\n\t\t#correct for fisheye lens distorsion - need to be implemented\n\t\tpass\t\n\t\t\n\t\t#align image centroid with the zenith - need to be implemented\n\t\tpass\n\t\t\n\t\t#correct for orientation; put north up\n\t\timgdata = image[0].data.astype('float32')\n\t\timage[0].data = rotate(imgdata,ori,center=(xc,yc),mode='edge')\n\t\timage[0].header['history']=f'Image is rotated by {ori} degrees'\t\n\t\timage[0].header['history']=f'Zenith is misaligned by {offset_distance} degrees'\t\n\t\timage[0].header['history']=f'Zenith bearing is {offset_bearing} degrees'\t\n\t\t\n\t\timage.flush()\n\t\timage.close()\n\t\t\nif __name__ == '__main__':\n\tmain()\n\n","repo_name":"liweihung/Fisheye","sub_path":"Scripts/positional_calibration.py","file_name":"positional_calibration.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"24905788292","text":"# -*- coding: utf-8 -*-\n# discord-py-template-100.py\n\"\"\"\n#\n# Discord.py を使った bot サーバーを作成するためのテンプレート\n#\n# 以下のコードを参考にして、Discord.py を使った bot サーバーを作成してください。\n# https://discordpy.readthedocs.io/ja/latest/\n#\n# @author: Deskuma \n# @version: 1.0.0a # バージョン\n# @license: MIT License # ライセンス\n# @see: https://discordpy.readthedocs.io/ja/latest/quickstart.html\n# @see: https://discordpy.readthedocs.io/ja/latest/api.html\n# @see: https://discordpy.readthedocs.io/ja/latest/ext/commands/api.html\n# @date: 2022/05/17 # 作成日\n# @update: 2022/05/17 # 更新日\n#\n# @usage: # 使い方\n# 準備:\n# - Python3.6.8 以上を使用する\n# - discord.py をインストールする\n# - このファイルに対しての修正箇所を書き直す(後でも良いけど)\n# 1. ファイル名を discord-<ボット名>-bot.py に変更する\n# 2. 作者名を変更する\n# 3. バージョンを変更する\n# 4. ライセンスを変更する\n# 5. 参考URLを変更する\n# 6. 作成日を変更する\n# 7. 更新日を変更する\n# 8. 使い方を変更する\n# 9. ファイルを保存する\n#\n# 実行:\n# ```sh\n# DISCORD_BOT_TOKEN='bot token' python3 discord-<ボット名>-bot.py\n# ```\n# 参考:\n# - https://discordpy.readthedocs.io/ja/latest/quickstart.html\n# - https://discordpy.readthedocs.io/ja/latest/api.html\n# - https://discordpy.readthedocs.io/ja/latest/ext/commands/api.html\n#\n# @dependency: # 依存関係\n# - このテンプレートは、Python3.6.8 以上を使用する\n# - このテンプレートは、discord.py を使用する\n# - このテンプレートは、以下のライブラリを使用する\n# - asyncio\n# - aiohttp\n# - aioconsole\n# - aiohttp.client_reqrep\n# - aiohttp.client_ws\n#\n# @note: # 注意事項\n# はじめに [PEP8](https://peps.python.org/pep-0008/) にはきっと準拠していません!\n# このテンプレートを使用する際は、それらの規約に従ってください。適宜見直して修正します!\n#\n# ※このプログラムコードおよびコメントも含め大半は GitHub AI ペアプログラミング\n# copilot によって書かれいています。そのため、予期せぬコードが書かれている可能性も\n# あります。厳密なチェックは行っていません。コードを実行する前に、自分で確認してください。\n# (って、copilot が言っています!この上記文章の一部も AI が自動で書いてくれましたw)\n# -----------------------------------------------------------------------------\n変更履歴\nversion: 1.0.0a コメント修正\nversion: 1.0.0 初版\n\n\"\"\"\n# -------------------------------------\n# const 定数定義域\n# -------------------------------------\n\"\"\"\n定数は、定義された値を変更できない。という言語制約がある。\nのだが Python では定数の概念は無くすべて変数として扱われる。\n定数名は、すべて大文字と'_'で構成するのが規約になっている。\nなので、この場所以外の処理コード中に「大文字変数に代入する」\nコードが、あったら*おかしい*と疑うべし(このコードにも含む)\n(インスタンスオブジェクト類は変数なので小文字で構成される)\n必要なライブラリの読み込み import もここで定義する。\n\n大文字_変数名 = ... 定数\nimport ... ライブラリの読み込み\nfrom ... import ... as ... # 別名\n\"\"\"\n# -- モジュール & パッケージ------------\nimport os\nfrom pydoc import cli\nimport discord # require: discord.py\nfrom datetime import datetime\n\n# -- 定数 -----------------------------\nDEBUG_MODE = False\nDISCORD_BOT_TOKEN = os.environ.get(\"DISCORD_BOT_TOKEN\") # ''\nCMD_PREFIX = \"!\" # ''\n\n# BOT 管理者情報\nBOT_ADMIN_DISCORD_ID = \"\" # ''\nBOT_ADMIN_USERNAME = \"\" # ''\nBOT_ADMIN_DISCRIMINATOR = \"0000\" # ''\n\n# -------------------------------------\n# def 関数定義域\n# -------------------------------------\n\"\"\"\n関数は、共通で使う処理などを定義する。\ndef 関数名(引数1, 引数2=デフォルト値, ...):\n 処理\n return 戻り値\n\"\"\"\n\n\ndef wait(seconds):\n \"\"\"\n 処理を待つ関数\n \"\"\"\n import time\n\n time.sleep(seconds)\n\n\ndef zero_pad(num, length):\n \"\"\"\n 数値をゼロで埋める関数\n \"\"\"\n return str(num).zfill(length)\n\n\ndef get_current_time():\n \"\"\"\n 現在時刻を取得する関数\n \"\"\"\n return datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")\n\n\ndef str_date_to_epoch(str_date):\n \"\"\"\n 文字列日付を epoch 日時に変換する関数\n \"\"\"\n return datetime.strptime(str_date, \"%Y/%m/%d %H:%M:%S\").timestamp()\n\n\ndef diff_date_time(date_time_1, date_time_2):\n \"\"\"\n 日時差を取得する関数\n \"\"\"\n epoch_1 = str_date_to_epoch(date_time_1)\n epoch_2 = str_date_to_epoch(date_time_2)\n return epoch_1 - epoch_2\n\n\ndef rand(min, max):\n \"\"\"\n 乱数を取得する関数\n \"\"\"\n import random\n\n return random.randint(min, max)\n\n\ndef roll_dice(ndn):\n \"\"\"\n ダイスロールを行う関数\n \"\"\"\n nd = ndn.split(\"d\")\n n = int(nd[0])\n d = int(nd[1])\n if n < 1 or d < 1 or n > 9 or d > 100:\n raise ValueError(\"invalid dice roll\")\n\n result = []\n for _ in range(n):\n result.append(rand(1, d))\n return result\n\n\ndef log(*message):\n \"\"\"\n ログを出力する関数\n \"\"\"\n out_flag = True\n # ログ出力フラグを確認して表示するか否かを判断する\n if len(message) != 0 and not DEBUG_MODE:\n if message[0].startswith(\"[debug]\"):\n out_flag = False\n\n if out_flag:\n print(get_current_time(), *message)\n\n\ndef print_test():\n \"\"\"\n テスト用関数\n\n 定数や変数の状態確認用です。\n 初心のうちは print() 命令たくさん使って中身を常に確認しましょう。\n 慣れたら変数の中身を見なくても想像つくようになります。\n \"\"\"\n log(\"[test] === test === begin\")\n # log(\"[test] DISCORD_BOT_TOKEN:\", DISCORD_BOT_TOKEN)\n log(\"[test] CMD_PREFIX:\", CMD_PREFIX)\n log(\"[test] BOT_ADMIN_DISCORD_ID:\", BOT_ADMIN_DISCORD_ID)\n log(\"[test] BOT_ADMIN_USERNAME:\", BOT_ADMIN_USERNAME)\n log(\"[test] BOT_ADMIN_DISCRIMINATOR:\", BOT_ADMIN_DISCRIMINATOR)\n log(\"[test] waiting... 2 seconds\")\n wait(2) # 2 秒待つ(これは diff_date_time() のテスト用)\n log(\"[test] boot_startup_time:\", boot_startup_time)\n log(\"[test] get_current_time:\", get_current_time())\n log(\"[test] diff_date_time:\", diff_date_time(get_current_time(), boot_startup_time))\n log(\"[test] === test === end\")\n log()\n\n\n# -------------------------------------\n# class 定義域\n# -------------------------------------\n\"\"\"\nクラスは、特定のまとまった処理を定義する。\nclass クラス名:\n def __init__(self, 引数):\n 初期化処理\n def 関数名(self, 引数):\n 処理\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\n\n\n# -- クラス定義 ------------------------\nclass Bot:\n def __init__(self, client, token=DISCORD_BOT_TOKEN):\n self.client = client\n self.token = token\n\n def run(self):\n \"\"\"bot を起動する\"\"\"\n log(\"[info] === start bot ===\")\n log(\"Bot is running...\")\n log(\"Ready, Discord Bot is activated!\")\n log(\"Standby for events...\")\n self.client.run(self.token)\n log()\n log(\"[info] === Bot is stopped ===\")\n\n def stop(self):\n \"\"\"bot を停止する\"\"\"\n # 停止する前に処理する内容を書く\n log(\"[info] boot_startup_time:\", boot_startup_time)\n log(\"[info] get_current_time:\", get_current_time())\n log(\n \"[info] diff_date_time:\",\n diff_date_time(get_current_time(), boot_startup_time),\n )\n\n\nclass Command(metaclass=ABCMeta):\n \"\"\"コマンド:基底クラス\"\"\"\n\n def __init__(self, name, description, func):\n # 基底クラスの初期化\n self.name = name\n self.description = description\n self.func = func\n # 初期化完了ログを出力する\n # (コマンドサブクラスがインスタンス化されるたびに出力される)\n log(\"[info] command:\", self.name, \"-\", self.description)\n\n @abstractmethod\n def run(self):\n \"\"\"コマンドの実行内容を実装する\"\"\"\n msg = \"Not implemented: run()\\n\"\n msg += \"command: \" + self.name + \"-\" + self.description\n return msg\n\n\nclass DiceCommand(Command):\n \"\"\"ダイスコマンド:サブクラス\"\"\"\n\n def __init__(self, message):\n # ここで基底クラスを初期化する\n super().__init__(\"Dice\", \"Dice rolling\", self.run)\n # 受け取ったメッセージを保存する\n self.message = message\n # これでクラスの初期化、終わり(インスタンス化された)\n\n def run(self):\n # def run(self):\n \"\"\"実行メソット\"\"\"\n try:\n mc = self.message.content\n an = self.message.author.name\n ndn = mc.split(\" \")[1]\n result = roll_dice(ndn)\n msg = f\"```\\n{an} rolled {ndn} = {result} = {sum(result)}\\n```\"\n return msg\n except Exception as e:\n log(\"[error]\", e)\n return f\"{an} ... invalid dice roll: {ndn}\"\n\n\n# -------------------------------------\n# グローバル変数定義域\n# -------------------------------------\n\"\"\"\nグローバル変数は、関数内部でも参照できる。定義された値も変更できる。\nPython は予約語以外、すべて変数として扱われる。\n変数名はすべて小文字と '_' で構成するのが理想。\nオブジェクト類は、大文字を含めて構成しても良い。\n\n変数名 = ... 変数\n\"\"\"\n\nboot_startup_time = get_current_time() # ブート時間\n\nintents = discord.Intents.default() # Discord.py のインテントを設定\nintents.messages = True # メッセージ情報を取得する\n# その他のインテントを設定する場合は、以下のように設定する\n# intents.members = True # メンバー情報を取得する\n# intents.guilds = True # グループ情報を取得する\n# intents.channels = True # チャンネル情報を取得する\nclient = discord.Client(intents=intents) # Discord.py のインスタンスを生成\n\n# -------------------------------------\n# イベント駆動処理定義域\n# -------------------------------------\n\"\"\"\nイベント駆動処理は、イベントが発火したときに行う。\n※この定義は client.run() を実行する前に行ってください!\n さらに discord.Client() インスタンスの生成後に行ってください。\n(client.run() 以降の処理は bot が停止してから実行されるため)\n要するに…\nclient = discord.Client() # Discord.py のインスタンスを生成\n...\n@client.event\nasync def on_ready():\n log('ログインしました。')\n log('名前:', client.user.name)\n log('ID:', client.user.id)\n log('Discord.pyバージョン:', discord.__version__)\n log('現在時刻:', get_current_time())\n...\nclient.run(DISCORD_BOT_TOKEN) # Discord bot token を渡して実行\n\"\"\"\n\n\n@client.event\nasync def on_ready():\n \"\"\"\n ボットが起動した時に呼び出される\n \"\"\"\n log(\"[info] Catch 'on_ready' event\")\n log(\"[info] Bot is ready! Discord BOT activated! Standby for next events...\")\n log()\n log(\"[debug] ====== Bot Information ====== begin\")\n log(\"[debug] bot_name:\", bot.client.user.name)\n log(\"[debug] bot_id:\", bot.client.user.id)\n log(\"[debug] bot_discriminator:\", bot.client.user.discriminator)\n log(\"[debug] bot_avatar:\", bot.client.user.avatar_url)\n log(\"[debug] bot_avatar_id:\", bot.client.user.avatar)\n log(\"[debug] bot_avatar_url:\", bot.client.user.avatar_url)\n log(\"[debug] bot_guilds:\", bot.client.guilds)\n log(\"[debug] bot_private_channels:\", bot.client.private_channels)\n log(\"[debug] bot_emojis:\", bot.client.emojis)\n log(\"[debug] bot_activity:\", bot.client.activity)\n log(\"[debug] bot_user:\", bot.client.user)\n log(\"[debug] ====== Bot Information ====== end\")\n log()\n log(\"[info] Bot startup time:\", boot_startup_time)\n log(\"[info] Bot stop command is [Ctrl + C]\")\n log()\n\n\n@client.event\nasync def on_message(message):\n \"\"\"\n メッセージが送信された時に呼び出される\n \"\"\"\n log(\"[info] Catch 'on_message' event\")\n log(\"[debug] ====== Message Information ====== begin\")\n log(\"[debug] message:\", message)\n log(\"[debug] message.author:\", message.author)\n log(\"[debug] message.author.name:\", message.author.name)\n log(\"[debug] message.author.id:\", message.author.id)\n log(\"[debug] message.author.discriminator:\", message.author.discriminator)\n log(\"[debug] message.author.avatar_url:\", message.author.avatar_url)\n log(\"[debug] message.author.avatar_id:\", message.author.avatar)\n log(\"[debug] message.author.avatar_url:\", message.author.avatar_url)\n log(\"[debug] message.channel:\", message.channel)\n log(\"[debug] ====== Message Information ====== end\")\n\n if message.author.bot:\n # ボットのメッセージは無視する\n return\n\n mc = message.content\n log(\"[debug] message.content:\", mc)\n\n if mc.startswith(CMD_PREFIX): # コマンドが送信された場合\n if mc.startswith(\"!hello\"):\n await message.channel.send(\"Hello!\")\n log(\"[info] Send 'Hello!' message\")\n elif mc.startswith(\"!ping\"):\n late = client.latency\n ping = round(late * 1000)\n await message.channel.send(\"🏓Pong! Latency: {} ms\".format(ping))\n log(\"[info] Send 'Pong!' message\")\n elif mc.startswith(\"!time\"):\n await message.channel.send(get_current_time())\n log(\"[info] Send current time\")\n elif mc.startswith(\"!dice2\"):\n msg = DiceCommand(message).run()\n await message.channel.send(msg)\n log(\"[info] Send dice2 message\")\n elif mc.startswith(\"!dice\"):\n params = mc.split(\" \")\n try:\n if len(params) == 2:\n ndn = params[1]\n result = roll_dice(ndn)\n if result == []:\n raise ValueError(\"Invalid dice number\")\n await message.channel.send(\"Dice result: \" + str(result))\n log(\"[info] Send dice result\")\n else:\n raise ValueError(\"Invalid dice number\")\n except ValueError:\n await message.channel.send(\"Usage: !dice d\")\n log(\"[info] Send 'Usage: !dice d' message\")\n\n elif mc.startswith(\"!help\"):\n help_msg = \"\"\"\n Command list:\n > !help : Show this help message\n > !hello : Say hello\n > !ping : Ping pong\n > !time : Show current time\n > !dice d (ex. !dice 2d6) (num = 1-9, face = 2-100)\n > !dice2 Implemented in CommandClass. (Params are the same as for !dice)\n > : Roll dice command\n > !debug : Toggle debug mode (see console log)\n > !!stop : Stop bot\n \"\"\"\n await message.channel.send(help_msg)\n log(\"[info] Send help message\")\n elif mc.startswith(\"!debug\"):\n global DEBUG_MODE\n DEBUG_MODE = not DEBUG_MODE\n if DEBUG_MODE:\n await message.channel.send(\"Debug mode is enabled.\")\n else:\n await message.channel.send(\"Debug mode is disabled.\")\n elif mc.startswith(\"!!\"):\n if mc.startswith(\"!!stop\"):\n await message.channel.send(\"Bot Stopping...\")\n log(\"[info] Stopping...\")\n wait(5)\n await client.close() # Discordサーバーとの接続を切断する\n bot.stop() # Botを停止させる\n log(\"[info] Stopped.\")\n else:\n await message.channel.send(\"Unknown command.\")\n log(\"[info] Unknown command.\")\n else:\n await message.channel.send(\"Unknown command!: \" + mc)\n log(\"[info] Send unknown command message\")\n\n\n# -------------------------------------\n# メイン処理\n# -------------------------------------\n\"\"\"\nメイン処理は、ここから始まる。\nPython では `if __name__ == '__main__':` という書き方をしている。\nこれは、このファイルが直接実行された場合にのみ、以下の処理を実行する。\nこのファイルが import された場合には、以下の処理は実行されない。\n\"\"\"\n\nif __name__ == \"__main__\":\n # ---------------------------------\n # main 処理域\n # ---------------------------------\n \"\"\"\n main 関数(じゃないけど)は、\n プログラムのエントリーポイントとなる。\n \"\"\"\n\n # -- 初期化 ------------------------\n \"\"\"\n 初期化処理は、プログラムの最初に行う。\n \"\"\"\n bot = Bot(client) # Bot クラスのインスタンスを生成\n\n # -- メイン処理 --------------------\n \"\"\"\n メイン処理は、プログラムの中で最も多くのコードを書く。\n \"\"\"\n log(\"startup_time:\", boot_startup_time)\n # Bot クラスの run() メソッドを実行する\n # 中で client.run() が実行されている\n print_test()\n bot.run()\n # ↓これ以下は bot が停止(Ctrl+C)するまで実行されない!\n # test\n print_test()\n\n # -- プログラム終了処理 -------------\n \"\"\"\n プログラム終了処理は、プログラムの最後に行う。\n \"\"\"\n log(\"[info] Program end time:\", get_current_time())\n","repo_name":"Deskuma/sample-code","sub_path":"discord/bot/discord-py/discord-py-template.py","file_name":"discord-py-template.py","file_ext":"py","file_size_in_byte":18328,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"70034312765","text":"# Lets talk about the differenece between class variables and instance variables\n# Class Variables ==> Class variables are respective to single class and if we try to change a class varibale with\n# it will be changed accross the class i.e., across all the instances which class have defined\n# Instance variables ==> Instance variales are with respectto instacne, if we do any changes to the instance of class \n# the result will be with in the instacne of the class only.\n\nclass Employee: #lets create a class\n\n raise_amount = 1.10 #this is the class variable\n # print(Employee.__dict__) to see the class varible in that\n\n def __init__(self, first, last, pay):\n self.first = first # this is called instance variable\n self.last = last\n self.pay = pay\n\n def full_name(self):\n return f'{self.first} {self.last}'\n\n # we can modify class variables and use them in two ways one way is below\n def increase_salary(self,amount):\n Employee.raise_amount = amount #Note here we are using the Employee which means class variable.\n return self\n \n # Another way to use class variables is by self only\n def increase_salary_2(self, amount):\n self.raise_amount = amount \n return self\n \n @classmethod\n def increase_raise(cls,amount):\n cls.raise_amount = amount\n\n#lets talk about the difference between both\nemp1 = Employee('Laxminarayana','Vadnala',500000)\nemp2 = Employee('Samanvitha', 'Sunkari', 500000)\n\n# this is just to see how we can print the variables availablity in classes\nprint(Employee.__dict__) #Checking\nprint(emp1.__dict__)\nprint(emp2.__dict__)\n\n# lets increase the salary\nemp1.increase_salary(1.2) #increased using class name directly so have wider effect not only one instance ie emp1\nprint('After increase using Class Name directly',emp1.__dict__)\nprint('Lets see emp2 dict',emp2.__dict__)\nprint(Employee.__dict__)\n\nemp1 = Employee('Laxminarayana','Vadnala',500000)\nemp2 = Employee('Samanvitha', 'Sunkari', 500000)\n\n#now we are increasing with self so it has effect only on emp1 that is insttance alone\nemp1.increase_salary_2(1.3) \nprint('After increasing using instance variable that is self==>',emp1.__dict__)\nprint('Lets see emp2 dict',emp2.__dict__)\nprint(Employee.__dict__)\n\n#If you have noticed we have a new instance varible inside the dict thats how we are trying to convert\n# class variable to instance variable (this is not exactly conversion but we are briging down the scope)\n# Scope of varibles and search policy in OOPS\n# Instance varibles(self) => Class variables (cls) => Global varibles (wrt program)\n# lets see same example different way\n\nemp1 = Employee('Laxminarayana','Vadnala',500000)\nemp2 = Employee('Samanvitha', 'Sunkari', 500000)\n\n# ALtering the class by class name directly if we try to change \nEmployee.raise_amount = 1.4\n#======== (OR) ========#\nemp1.increase_raise(1.4) #lets talk more about using classmethods while discussing about them\n\nprint(Employee.raise_amount)\nprint(emp1.raise_amount)\nprint(emp2.raise_amount)\n\n#NOTE - if you have observed clearly when ever we are trying to change the class varible \n# with the help of the class name directly its affecting the all the instances so this is so capability of class varibles\n\n#now let me change the value in using the instance variable\n# Yes we can change class varibles wrt to isntances\nemp1 = Employee('Laxminarayana','Vadnala',500000)\nemp2 = Employee('Samanvitha', 'Sunkari', 500000)\n\nprint(emp1.raise_amount)\nemp1.raise_amount = 1.5\n\nprint(Employee.raise_amount)\nprint(emp1.raise_amount)\nprint(emp2.raise_amount)\n\n# so from converntion for class varibles we can either use self or cls\n\n#=================================#\n# Now lets talk about the class methods and static methods\n# CLASS METHODS -> Which can use class varibles and they are mainly used to create \n# another instance of the classes lets walk through of an example\n\nclass Parent:\n\n fam_cunt = 0 # lets define a new class varible \n\n def __init__(self, childs):\n self.childs = childs\n # we can use class varibles as refernece becaz there details are present in all the insatcnes\n Parent.fam_cunt += 1\n\n @classmethod\n def child_to_parent(cls,children_born):\n # when childs grows up and have kids ultimately he is also a parent\n return Parent(children_born)\n\nfam1 = Parent(2)\n# now we are trying to create new isnatcne of parent class from exisiting instance\nfam2 = fam1.child_to_parent(1)\n\nprint(fam1.__dict__)\nprint(fam2.__dict__)\nprint(Parent.fam_cunt) #isnt it cool class variables are like a central database for all instances of classes\n\n# Now lets talk about the static methods \n# STATIC METHODS -> static methods are one which doesnt use any self or cls (in other words no isnatnce is refered)\n# static methods are just like the normal functions but they are grouped with class becaz they are some how related to them.\n\nclass Family:\n\n def __init__(self,members):\n self.members = members\n\n @staticmethod\n def is_enjoying(trips):\n # this methods tells whether family is enjoying or not based on number of trips done by family\n if trips >= 2:\n return True\n return False\n\nfam = Family(3)\nprint(fam.is_enjoying(3))\nprint(fam.is_enjoying(1))","repo_name":"LaxminarayanaV7416/MostlyPython","sub_path":"Python OOPS/class_variable.py","file_name":"class_variable.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"41"} +{"seq_id":"27963807189","text":"from collections import namedtuple\nfrom enum import Enum\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union\nimport jax\nfrom utils.shard_utils import set_partitions, _id_fn\nfrom flax.core.frozen_dict import freeze\nimport jax.numpy as jnp\nfrom flax.core.frozen_dict import freeze, unfreeze\nfrom jax.experimental.maps import Mesh\nimport numpy as np\nfrom utils.multihost_shard_utils import host_param_shard, get_mesh_idxs, get_mesh_lens\nfrom jax.random import KeyArray\nfrom optax import softmax_cross_entropy_with_integer_labels\nfrom flax.core.frozen_dict import FrozenDict\nimport optax\nfrom jaxtyping import PyTree\nfrom transformers.modeling_flax_utils import FlaxPreTrainedModel\nfrom transformers.tokenization_utils import PreTrainedTokenizer\nfrom jax.experimental.pjit import pjit\nimport itertools\nfrom jax.interpreters import pxla\nfrom jax.experimental import PartitionSpec\nfrom jax.experimental.pjit import with_sharding_constraint\n\n# inspired by FlaxFormer repo: https://github.com/google/flaxformer/blob/main/flaxformer/activation_partitioning.py\ndef global_mesh_defined():\n \"\"\"Checks if global xmap/pjit mesh resource environment is defined.\"\"\"\n maps_env = pxla.thread_resources.env\n return maps_env.physical_mesh.devices.shape != () # pylint: disable=g-explicit-bool-comparison\n\ndef with_sharding_constraint_if_mesh_defined(x: PyTree, partition_spec_description: Tuple[Optional[str]]):\n if global_mesh_defined():\n return with_sharding_constraint(x, PartitionSpec(*partition_spec_description))\n else:\n return x\n\nclass OptimType(Enum):\n AdamW = 1\n AdamWMultiStep = 2\n AdaFactor = 3\n AdaFactorMultiStep = 4\n PALMAdaFactor = 5\n PALMAdaFactorMultiStep = 6\n\ndef shard_params(model_init_fn: Callable[[KeyArray], PyTree], params: PyTree, shard_rules: Any, mesh: Mesh, mp_axis: int) -> Tuple[PyTree, PyTree]:\n\n # dummy rng\n rng = jax.random.PRNGKey(0)\n\n # specifies how to split model parameters beteen devices\n param_spec = set_partitions(unfreeze(params), shard_rules)\n\n # initialization function for splitting parameters to devices\n p_get_initial_params = pjit(\n _id_fn, \n in_axis_resources=(param_spec, None), \n out_axis_resources=(param_spec, None), \n )\n \n # initialize parameters from random, used to determining host-level param mapping\n p_model_init_fn = pjit(\n model_init_fn,\n in_axis_resources=(None,), \n out_axis_resources=param_spec, \n )\n \n # split the parameters per-host\n with mesh:\n rng, new_rng = jax.random.split(rng)\n host_param_shapes = jax.eval_shape(p_model_init_fn, new_rng)\n with jax.default_device(jax.devices('cpu')[0]):\n params = host_param_shard(host_param_shapes, params, mesh.devices, mp_axis)\n\n # split the params between all devices\n with mesh:\n params, _ = p_get_initial_params(freeze(params), jnp.ones((), dtype=jnp.uint32))\n \n return params, param_spec\n\ndef shard_optim_and_params(model_init_fn: Callable[[KeyArray], PyTree], params: PyTree, shard_rules: Any, mesh: Mesh, mp_axis: int, \n optim: optax.GradientTransformation, optim_type: OptimType) -> Tuple[Tuple[PyTree, PyTree], Tuple[PyTree, PyTree]]:\n \n # dummy rng\n rng = jax.random.PRNGKey(0)\n \n # Shard params and optimizer state onto devices\n # Source: https://github.com/huggingface/transformers/blob/main/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py\n def get_initial_state(params):\n opt_state = optim.init(params)\n return opt_state, params\n \n # specifies how to split model parameters beteen devices\n param_spec = set_partitions(unfreeze(params), shard_rules)\n\n # Get the PyTree for opt_state, we don't actually initialize the opt_state yet.\n class ShapeDtype(object):\n def __init__(self, shape, dtype):\n self.shape = shape\n self.dtype = dtype\n params_shapes = jax.tree_util.tree_map(lambda x: ShapeDtype(x.shape, x.dtype), params)\n state_shapes = jax.eval_shape(get_initial_state, params_shapes)\n\n # get PartitionSpec for opt_state, this is very specific to adamw\n # TODO: optax returns different state for different optimizers, how can we handle this generically ?\n # or maybe we don't since in our examples we just use adamw or adafactor\n def get_opt_spec(x):\n if isinstance(x, (dict, FrozenDict,)):\n return param_spec\n return None\n if optim_type is OptimType.AdamW or optim_type is OptimType.AdamWMultiStep:\n opt_state_spec, param_spec = jax.tree_util.tree_map(\n get_opt_spec, state_shapes, is_leaf=lambda x: isinstance(x, (dict, FrozenDict, optax.EmptyState,))\n )\n elif optim_type is OptimType.AdaFactorMultiStep:\n opt_state_spec, param_spec = jax.tree_util.tree_map(\n get_opt_spec, state_shapes, is_leaf=lambda x: isinstance(x, (dict, FrozenDict, optax.EmptyState,))\n )\n opt_state_spec = opt_state_spec._replace(inner_opt_state=None)\n elif optim_type is OptimType.AdaFactor:\n opt_state_spec = None\n elif optim_type is OptimType.PALMAdaFactorMultiStep:\n opt_state_spec, param_spec = jax.tree_util.tree_map(\n get_opt_spec, state_shapes, is_leaf=lambda x: isinstance(x, (dict, FrozenDict, optax.EmptyState,))\n )\n # replace part of pytree with None\n temp1 = list(opt_state_spec.inner_opt_state[1])\n temp1[0] = opt_state_spec.inner_opt_state[1][0]._replace(v_row=None, v_col=None)\n temp1 = tuple(temp1)\n temp2 = list(opt_state_spec.inner_opt_state)\n temp2[1] = temp1\n temp2 = tuple(temp2)\n opt_state_spec = opt_state_spec._replace(inner_opt_state=temp2)\n elif optim_type is OptimType.PALMAdaFactor:\n opt_state_spec, param_spec = jax.tree_util.tree_map(\n get_opt_spec, state_shapes, is_leaf=lambda x: isinstance(x, (dict, FrozenDict, optax.EmptyState,))\n )\n # replace part of pytree with None\n temp1 = list(opt_state_spec[1])\n temp1[0] = opt_state_spec[1][0]._replace(v_row=None, v_col=None)\n temp1 = tuple(temp1)\n temp2 = list(opt_state_spec)\n temp2[1] = temp1\n temp2 = tuple(temp2)\n opt_state_spec = temp2\n else:\n raise NotImplementedError\n # pjit the get_initial_state function to shard params and init\n # optimizer state in sharded way\n p_get_initial_state = pjit(\n get_initial_state, \n in_axis_resources=(param_spec,), \n out_axis_resources=(opt_state_spec, param_spec),\n )\n \n # initialize parameters from random, used to determining host-level param mapping\n p_model_init_fn = pjit(\n model_init_fn,\n in_axis_resources=(None,), \n out_axis_resources=param_spec, \n )\n \n # split the parameters per-host\n with mesh:\n rng, new_rng = jax.random.split(rng)\n host_param_shapes = jax.eval_shape(p_model_init_fn, new_rng)\n with jax.default_device(jax.devices('cpu')[0]):\n params = host_param_shard(host_param_shapes, params, mesh.devices, mp_axis)\n\n # split the opt_state and params between all devices\n with mesh:\n opt_state, params = p_get_initial_state(params)\n \n return (params, param_spec), (opt_state, opt_state_spec)\n\ndef shard_data_list(data: List[Any], mesh: Mesh, dp_axis: int):\n dp_size = get_mesh_lens(mesh.devices)[dp_axis]\n dp_idx = get_mesh_idxs(jax.process_index(), mesh.devices)[dp_axis]\n return data[dp_idx::dp_size]\n\ndef shard_data_iterable(data: Iterable[Any], mesh: Mesh, dp_axis: int):\n dp_size = get_mesh_lens(mesh.devices)[dp_axis]\n dp_idx = get_mesh_idxs(jax.process_index(), mesh.devices)[dp_axis]\n return itertools.islice(data, dp_idx, None, dp_size)\n","repo_name":"AlexWan0/Poisoning-Instruction-Tuned-Models","sub_path":"src/shard.py","file_name":"shard.py","file_ext":"py","file_size_in_byte":7823,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"41"} +{"seq_id":"2147277321","text":"import wandb\nimport pandas as pd\n\napi = wandb.Api()\n\ndef log_precentage(wandb_run, GT, key):\n api = wandb.Api()\n run = api.run(f\"{wandb_run.entity}/{wandb_run.project}/{wandb_run.id}\")\n data = run.history(keys=[\"train/episode\",key])\n data[key + \" %\"] = data[key]/GT*100\n table = wandb.Table(data=data)\n wandb_run.log(\n {\n f\"{key}_precentage\": wandb.plot.line(\n table,\"train/episode\", key + \" %\", title=f\"{key} %\"\n )\n }\n )\n\nentity, project = \"qaq37\", \"HRC_model_based_rl_2\"\nruns = api.runs(entity + \"/\" + project)\n\nlatest_run = runs[0]\n\n# Get logged data\nhistory = latest_run.history()\nname = latest_run.name\n\n\nsummary_list, config_list, name_list = [], [], []\nfor run in runs:\n print(run.name)\n name = run.name\n config = run.config\n summary = run.summary\n history = run.history()\n\n keys = history.columns.values.tolist()\n episode = history['train/episode']\n episode_start_idx = episode[episode == 0].index.tolist()\n\n # .summary contains the output keys/values\n # for metrics such as accuracy.\n # We call ._json_dict to omit large files\n run.config\n\n\n summary_list.append(run.summary._json_dict)\n\n # .config contains the hyperparameters.\n # We remove special values that start with _.\n config_list.append({k: v for k, v in run.config.items() if not k.startswith(\"_\")})\n\n # .name is the human-readable name of the run.\n name_list.append(run.name)\n\nruns_df = pd.DataFrame(\n {\"summary\": summary_list, \"config\": config_list, \"name\": name_list}\n)\n\nruns_df.to_csv(\"project.csv\")\n","repo_name":"LeyangWen/meta-q-learning","sub_path":"plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"41"} +{"seq_id":"41128944343","text":"import scrapy\nfrom image_scraper.items import ImageScraperItem\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom scrapy import Selector\nimport time\n\nclass artStation(scrapy.Spider):\n # This spider is blocked by the site and returned 403 status\n # all pieces are tested on shell and working OK. \n # But when running as 'scrapy crawl artStation' it's blocked to run parse().\n name = 'artStation'\n allowed_domains = ['www.artstation.com']\n start_urls = ['https://www.artstation.com/search?sort_by=relevance&query=cyberpunk']\n \n def __init__(self):\n super().__init__()\n options = webdriver.ChromeOptions()\n #options.add_argument(\"--headless\") \n options.add_argument(\"--disable-extensions\")\n self.driver = webdriver.Chrome(options=options)\n\n\n def parse(self, response):\n current_url = response.url\n print('DEV: before scroll down called.')\n selector = self.scroll_down(current_url, 20)\n print('DEV: after scroll down called.')\n source_pages = selector.css('a.gallery-grid-link::attr(href)').getall()\n for page in source_pages:\n yield response.follow(page, callback=self.extract_image)\n \n\n def extract_image(self, response):\n self.driver.get(response.url)\n time.sleep(1)\n selector = Selector(text=self.driver.page_source)\n item = ImageScraperItem()\n item['image_urls'] = selector.css('a.btn::attr(href)').re('^https(?:(?!dl=1).)+$')\n return item\n \n \n def scroll_down(self, url, scrolls):\n print('DEV: getting url...')\n self.driver.get(url)\n time.sleep(1)\n\n for i in range(scrolls):\n# sel_res_txt = self.driver.page_source\n# selector = Selector(text=sel_res_txt)\n\n# next_button = selector.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"mui-isiaxn-button\", \" \" ))]')\n# next_text = next_button.css(\"::text\").get() \n# next_page = next_button.css(\"::attr(href)\").get()\n \n action = ActionChains(self.driver)\n action.scroll_by_amount(0, 2000)\n action.perform()\n time.sleep(3)\n sel_res_txt = self.driver.page_source\n selector = Selector(text=sel_res_txt)\n return selector\n ","repo_name":"pleix64/image-scraper","sub_path":"image_scraper/spiders/artStation.py","file_name":"artStation.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"10731739729","text":"# opticalmodel_globals\n# Contains global variables for the optical model calculations\n# =============================================================================================== #\n# Patrick MacGregor\n# Nuclear Physics Research Group\n# School of Physics and Astronomy\n# The University of Manchester\n# =============================================================================================== #\n# GLOBAL CONSTANTS\nimport numpy as np\namu = 931.494\t# amu in MeV/c^2\nPRINT = 0\nsep = \"\\t\"\ndiv = \"--------------------------------------------------\"\nDIV = \"==================================================\"\n# ----------------------------------------------------------------------------------------------- #\n# Print out the calculated quantities in the string list\ndef PrintOpticalModel(string_list, name):\n\tprint(DIV)\n\tprint(name + \" potential:\")\n\tprint(div)\n\tfor i in range(0,len(string_list)):\n\t\t\tprint(string_list[i])\n\n# ----------------------------------------------------------------------------------------------- #\ndef PrintCalculatedQuantities(A,Z,E,Q):\n\tprint(div)\n\tprint(\"A\" + sep + str(A))\n\tprint(\"Z\" + sep + str(Z))\n\tprint(\"E\" + sep + str(E))\n\tprint(\"Q\" + sep + str(Q))\n\n# ----------------------------------------------------------------------------------------------- #\n# Calculate Q value\ndef CalcQ( M_Target, M_Projectile, M_Ejectile, M_Product ):\n\treturn ( (M_Target + M_Projectile) - (M_Ejectile + M_Product) )*amu\n\n# ----------------------------------------------------------------------------------------------- #\n# Calculate separation energy for neutron or proton\ndef CalcSepEn( M_Light, M_Heavy, reaction_type ):\n\t# Neutron separation energy\n\tif reaction_type in [ \"dp\", \"pd\", \"ha\", \"ah\" ]:\n\t\treturn 939.5654133 + amu*(M_Light - M_Heavy)\n\n\t# Proton separation energy\n\telif reaction_type in [ \"th\", \"ht\", \"at\", \"ta\" ]:\n\t\treturn 938.2720813 + amu*(M_Light - M_Heavy)\n\t\n\t# No separation energy\n\telse:\n\t\treturn -1.0\n\n\t\t\n\n\n# ----------------------------------------------------------------------------------------------- #\n# Calculate trivial quantities\ndef CalcTrivials(A, Z, Ebeam, Ex, M_Target, M_Projectile, M_Ejectile, M_Product, H):\n\t# Number of neutrons\n\tN = A - Z\n\t\n\t# Calculate Q value\n\tQ = CalcQ( M_Target, M_Projectile, M_Ejectile, M_Product )\n\t\n\t# Calculate energy\n\tif H == 0:\n\t\tE = Ebeam\n\telif H == 1:\n\t\tE = Ebeam + Q - Ex\n\n\t# Return list\n\ttriv_list = [N, Q, E]\n\n\treturn triv_list\n\n# ----------------------------------------------------------------------------------------------- #\n# Make the string list from the calculated quantities\ndef MakeStringList(v,r,a,rc0):\n\tstringList = []\n\tstringList.append(\"v = \" + str(round(v[0],3)) + \" r0 = \" + str(round(r[0],3)) + \" a = \" + str(round(a[0],3)))\n\tstringList.append(\"vi = \" + str(round(v[1],3)) + \" ri0 = \" + str(round(r[1],3)) + \" ai = \" + str(round(a[1],3)))\n\tstringList.append(\"vsi = \" + str(round(v[2],3)) + \" rsi0 = \" + str(round(r[2],3)) + \" asi = \" + str(round(a[2],3)))\n\tstringList.append(\"vso = \" + str(round(v[3],3)) + \" rso0 = \" + str(round(r[3],3)) + \" aso = \" + str(round(a[3],3)))\n\tstringList.append(\"vsoi = \" + str(round(v[4],3)) + \" rsoi0 = \" + str(round(r[4],3)) + \" asoi = \" + str(round(a[4],3)) + \" rc0 = \" + str(round(rc0,3)))\n\treturn stringList\n\n# ----------------------------------------------------------------------------------------------- #\ndef CheckP(p):\n\tif p != 0 and p != 1:\n\t\traise ValueError(\"p must have a value of 0 or 1\")\n\n# ----------------------------------------------------------------------------------------------- #\n# DWUCK STUFF!!!!\n# Define a function to write a block of 8 characters padded with spaces at the end\ndef WriteBlock(string, n = 8):\n\tif len(string) > n:\n\t\tprint(\"Not allowed!\")\n\t\treturn \" \"*n\n\telse:\n\t\treturn string + \" \"*(n - len(string))\n\n# Return + or - based on the sign of the number\ndef GetSignChar(num):\n\tif num < 0: return \"-\"\n\telif num >= 0: return \"+\"\n\telse: return \"?\"\n\n# Return a number rounded to a particular length\ndef RoundNumToLength(num,length):\n\tif num == 0:\n\t\tpower = 2\n\telse:\n\t\tpower = max( np.ceil( np.log10(abs(num)) ), 2 )\n\t\n\treturn format(num, \"0\" + str(length) + \".\" + str(int(length - power - 1)) + \"f\" )\n\t\n# Combine these to write a block for specifying DWUCK optical model input\ndef WriteDWUCKOMPar(par):\n\treturn WriteBlock(GetSignChar(par) + RoundNumToLength(abs(par),6) )\n\t\ndef WriteDWUCKSmallNum(par):\n\treturn WriteBlock( format(par, \"4.1f\" ) )\n\t\ndef WriteDWUCKSignSmallNum(par):\n\treturn WriteBlock( GetSignChar(par) + format(abs(par), \"04.1f\" ) )\t\n\t\t\n# Define the DWUCK optical model input parameters\ndef WriteDWUCKOMBlock(v,r,a,rc0):\n\tstring_list = []\n\tstring_list.append( WriteBlock(\"+01.\") + WriteDWUCKOMPar(-v[0]) + WriteDWUCKOMPar(r[0]) + WriteDWUCKOMPar(a[0]) + WriteBlock(\"\") + WriteDWUCKOMPar(-v[1]) + WriteDWUCKOMPar(r[1]) + WriteDWUCKOMPar(a[1]) )\n\tstring_list.append( WriteBlock(\"+02.\") + WriteDWUCKOMPar(0.00) + WriteDWUCKOMPar(0.00) + WriteDWUCKOMPar(0.00) + WriteBlock(\"\") + WriteDWUCKOMPar(4*v[2]) + WriteDWUCKOMPar(r[2]) + WriteDWUCKOMPar(a[2]) )\n\tstring_list.append( WriteBlock(\"-04.\") + WriteDWUCKOMPar(-4*v[3]) + WriteDWUCKOMPar(r[3]) + WriteDWUCKOMPar(a[3]) + WriteBlock(\"\") + WriteDWUCKOMPar(-4*v[4]) + WriteDWUCKOMPar(r[4]) + WriteDWUCKOMPar(a[4]) )\n\tstring_list.append( WriteBlock( format( rc0, \"07.3f\" ) ) )\n\treturn string_list\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"patrick-macgregor/SPECTRUM-ANALYSIS-CODE","sub_path":"PtolemyCode/opticalmodel_globals.py","file_name":"opticalmodel_globals.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"28513672863","text":"import os\ndef directory(path,extension):\n list_dir = []\n list_dir = os.listdir(path)\n count = 0\n for file in list_dir:\n if file.endswith(extension):\n count += 1\n return count\n\nprint(\"No.of files in Dir : \",directory(\"C:\\\\Users\\\\srini\\\\Desktop\\\\WorkSpace\\\\Practice\",\".py\"))\n\nprint(\"No.of files in Dir : \",directory(\"C:\\\\Users\\\\srini\\\\Desktop\\\\WorkSpace\\\\PythonPracticeEx\",\".py\"))","repo_name":"thammaneni/Python","sub_path":"PythonPracticeEx/FilesCountDir.py","file_name":"FilesCountDir.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"25529427218","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom . import models\nfrom . import forms\n\nfrom .doctorViews import generate_bill\n\ndef entry(request):\n pass\n\n\ndef register(request):\n # if request.session.get('is_login', None):\n # login_type = request.session['login_type']\n # return redirect(f'/{login_type}/index/')\n\n if request.method == 'POST':\n register_form = forms.PatientRegisterForm(request.POST)\n message = \"请检查填写的内容!\"\n if register_form.is_valid():\n name = register_form.cleaned_data.get('name')\n password1 = register_form.cleaned_data.get('password1')\n password2 = register_form.cleaned_data.get('password2')\n # gender = register_form.cleaned_data.get('gender')\n gender = request.POST.get('gender')\n identity_card_no = register_form.cleaned_data.get('identity_card_no')\n # medical_insurance = register_form.cleaned_data.get('medical_insurance')\n medical_insurance = request.POST.get('medical_insurance')\n telephone_no = register_form.cleaned_data.get('telephone_no')\n birth_date = register_form.cleaned_data.get('birth_date')\n if password1 != password2:\n message = '两次输入的密码不同!'\n return render(request, 'patient/register.html', locals())\n else:\n same_ic = models.Patient.objects.filter(identity_card_no=identity_card_no)\n if same_ic:\n message = '身份证号已经注册'\n return render(request, 'patient/register.html', locals())\n\n new_patient = models.Patient()\n new_patient.name = name\n new_patient.password = password1\n new_patient.gender = gender\n new_patient.identity_card_no = identity_card_no\n new_patient.medical_insurance = medical_insurance\n new_patient.telephone_no = telephone_no\n new_patient.birth_date = birth_date\n new_patient.save()\n\n return redirect('/patient/login/')\n else:\n return render(request, 'patient/register.html', locals())\n register_form = forms.PatientRegisterForm()\n return render(request, 'patient/register.html', locals())\n\n\ndef login(request):\n # if request.session.get('is_login', None):\n # login_type = request.session['login_type']\n # return redirect(f'/{login_type}/index/')\n if request.method == 'POST':\n login_form = forms.LoginForm(request.POST)\n message = '请检查填写的内容!'\n if login_form.is_valid():\n identity_card_no = login_form.cleaned_data.get('identity_card_no')\n password = login_form.cleaned_data.get('password')\n patient = models.Patient.objects.filter(identity_card_no=identity_card_no)\n if not patient:\n message = '用户不存在'\n return render(request, 'patient/login.html', locals())\n patient = patient[0]\n\n if patient.password != password:\n message = '密码错误'\n return render(request, 'patient/login.html', locals())\n request.session['is_login'] = True\n request.session['login_type'] = 'patient'\n request.session['identity_card_no'] = identity_card_no\n return redirect('/patient/info/')\n else:\n return render(request, 'patient/login.html', locals())\n\n login_form = forms.LoginForm()\n return render(request, 'patient/login.html', locals())\n\n\ndef logout(request):\n if request.session.get('is_login', None):\n request.session['is_login'] = False\n request.session['login_type'] = None\n request.session['ID'] = None\n return redirect('/Hospital/')\n\n\ndef index(request):\n return render(request, 'patient/index_old.html', locals())\n\n\ndef info(request):\n identity_card_no = request.session['identity_card_no']\n patient = models.Patient.objects.get(identity_card_no=identity_card_no)\n return render(request, 'patient/info.html', {'patient': patient})\n\n\ndef makeAppointment(request):\n patient = models.Patient.objects.get(identity_card_no=request.session['identity_card_no'])\n if request.method == 'POST':\n # 统计已有挂号数,假如超过两个不允许再挂号\n patient = models.Patient.objects.get(identity_card_no=request.session['identity_card_no'])\n num = len(models.Appointment.objects.filter(patient=patient, isActive=True))\n # 待修改\n if num > 100:\n message = '挂号多于两个'\n return render(request, 'patient/makeAppointment.html', locals())\n\n appointment = models.Appointment()\n appointment.patient = models.Patient.objects.get(identity_card_no=request.session['identity_card_no'])\n appointment.doctor = models.Doctor.objects.get(identity_card_no=request.POST.get('appointment_doctor_id'))\n appointment.appointment_time = request.POST.get('appointment_time')\n appointment.appointment_date = request.POST.get('appointment_date')\n\n appointment.isActive = True\n appointment.save()\n request.session['appointment_id'] = appointment.id\n\n return redirect('/patient/makeAppointment/detail')\n\n departs = [depart[0] for depart in models.Doctor.department_choices]\n departs_ = [depart[1] for depart in models.Doctor.department_choices]\n depart = request.GET.get('depart', departs_[0] if 'depart' not in request.session else request.session['depart'])\n request.session['depart'] = depart\n # print('科室', depart, departs_.index(depart))\n records = models.Doctor.objects.filter(department=departs[departs_.index(depart)])\n num_per_page = 5\n paginator = Paginator(records, num_per_page)\n page = request.GET.get('page', 1)\n try:\n page_obj = paginator.page(page)\n except PageNotAnInteger:\n page_obj = paginator.page(1)\n except EmptyPage:\n page_obj = paginator.page(paginator.num_pages)\n is_paginated = True if paginator.num_pages > 1 else False\n page_range = paginator.get_elided_page_range(page, on_each_side=3, on_ends=2)\n return render(request, 'patient/makeAppointment.html', locals())\n\n\ndef appointmentDetail(request):\n appointment = models.Appointment.objects.get(id=request.session['appointment_id'])\n return render(request, 'patient/appointmentDetail.html', locals())\n\n\ndef appointment(request):\n identity_card_no = request.session['identity_card_no']\n patient = models.Patient.objects.get(identity_card_no=identity_card_no)\n appointments = models.Appointment.objects.filter(patient=patient)\n num_per_page = 5\n paginator = Paginator(appointments, num_per_page)\n page = request.GET.get('page', 1)\n try:\n page_obj = paginator.page(page)\n except PageNotAnInteger:\n page_obj = paginator.page(1)\n except EmptyPage:\n page_obj = paginator.page(paginator.num_pages)\n is_paginated = True if paginator.num_pages > 1 else False\n page_range = paginator.get_elided_page_range(page, on_each_side=3, on_ends=2)\n return render(request, 'patient/appointment.html', locals())\n\n\ndef diagnosis(request):\n if request.method == 'POST':\n request.session['diagnosis'] = request.POST.get('diagnosis')\n return redirect('/patient/diagnosis/detail')\n identity_card_no = request.session['identity_card_no']\n patient = models.Patient.objects.get(identity_card_no=identity_card_no)\n diagnosis_records = models.Diagnosis.objects.filter(patient=patient)\n num_per_page = 5\n paginator = Paginator(diagnosis_records, num_per_page)\n page = request.GET.get('page', 1)\n try:\n page_obj = paginator.page(page)\n except PageNotAnInteger:\n page_obj = paginator.page(1)\n except EmptyPage:\n page_obj = paginator.page(paginator.num_pages)\n is_paginated = True if paginator.num_pages > 1 else False\n page_range = paginator.get_elided_page_range(page, on_each_side=3, on_ends=2)\n return render(request, 'patient/diagnosis.html', locals())\n\n\ndef diagnosisDetail(request):\n record = models.Diagnosis.objects.get(id=request.session['diagnosis'])\n medicines = models.MedicineRequest.objects.filter(diagnosis=record)\n return render(request, 'patient/detail.html', locals())\n\n\ndef bill(request):\n if request.method == 'POST':\n request.session['bill'] = request.POST.get('bill')\n return redirect('/patient/bill/detail')\n identity_card_no = request.session['identity_card_no']\n patient = models.Patient.objects.get(identity_card_no=identity_card_no)\n bills = models.Bill.objects.filter(diagnosis__in=models.Diagnosis.objects.filter(patient=patient))\n\n num_per_page = 5\n paginator = Paginator(bills, num_per_page)\n page = request.GET.get('page', 1)\n try:\n page_obj = paginator.page(page)\n except PageNotAnInteger:\n page_obj = paginator.page(1)\n except EmptyPage:\n page_obj = paginator.page(paginator.num_pages)\n is_paginated = True if paginator.num_pages > 1 else False\n page_range = paginator.get_elided_page_range(page, on_each_side=3, on_ends=2)\n return render(request, 'patient/bill.html', locals())\n\n\ndef billDetail(request):\n bill = models.Bill.objects.get(id=request.session['bill'])\n diag = bill.diagnosis\n bill_detail = generate_bill(diag)\n if request.method == 'POST':\n bill.is_active = False\n bill.save()\n message = '缴费成功'\n return render(request, 'patient/billDetail.html', locals())\n return render(request, 'patient/billDetail.html', locals())\n\n\n","repo_name":"AlphaLeonid/HospitalSystem","sub_path":"Hospital/patientViews.py","file_name":"patientViews.py","file_ext":"py","file_size_in_byte":9848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"10310491203","text":"\"\"\"Add a magic handler for select, describe and explain plugins.\"\"\"\nfrom IPython.core import magic\nfrom rekall import ipython_support\n\n\n@magic.magics_class\nclass EfilterMagics(magic.Magics):\n\n def _RunPlugin(self, session, plugin_name, line):\n # Strip quotes.\n while line[0] == line[-1] and line[0] in \"'\\\"\":\n line = line[1:-1]\n\n return session.RunPlugin(plugin_name, query=line)\n\n @magic.line_cell_magic\n def SELECT(self, line, cell=None):\n return self._process_select(line, cell)\n\n @magic.line_cell_magic\n def select(self, line, cell=None):\n \"\"\"This makes it easier to run the search plugin:\n\n[1] win7.elf 15:35:09> select * from pslist() where _EPROCESS.name =~ \"svchost\"\n _EPROCESS Name PID PPID Thds Hnds Sess Wow64\n-------------- -------------------- ----- ------ ------ -------- ------ ------\n0xfa80024f85d0 svchost.exe 236 480 19 455 0 False\n0xfa80023f6770 svchost.exe 608 480 12 352 0 False\n \"\"\"\n return self._process_select(line, cell)\n\n def _process_select(self, line, cell=None):\n session = self.shell.user_module.session\n return self._RunPlugin(session, \"search\", \"select \" + line + (\n cell or \"\"))\n\n @magic.line_cell_magic\n def pager(self, line, cell=None):\n session = self.shell.user_module.session\n if \" \" in line:\n _, line_end = line.split(\" \", 1)\n else:\n # A bare pager magic with pager already set, means to clear it.\n if session.GetParameter(\"pager\"):\n session.SetParameter(\"pager\", None)\n return\n\n line_end = \"less\"\n\n session.SetParameter(\"pager\", line_end)\n\n\nipython_support.REGISTERED_MAGICS.append(EfilterMagics)\n","repo_name":"google/rekall","sub_path":"rekall-core/rekall/plugins/common/efilter_plugins/ipython.py","file_name":"ipython.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":1883,"dataset":"github-code","pt":"41"} +{"seq_id":"34270896762","text":"import click\nfrom ship_it import fpm\n\n@click.command()\n@click.option('--requirements', default=None, help='Path to requirements.txt')\n@click.option('--setup', default=None, help='Path to setup.py')\n@click.argument('manifest')\ndef main(manifest, requirements, setup):\n fpm(manifest, requirements, setup)\n\nif __name__ == '__main__':\n main()\n","repo_name":"robdennis/ship_it","sub_path":"ship_it/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"41"} +{"seq_id":"2276071733","text":"import os\ndirname = os.path.dirname(__file__)\n\nin_file = dirname + '/input.txt'\nf = open(in_file, 'r', encoding='UTF-8')\n\nn = int(f.readline())\na = []\nfor i in range(n):\n a.append(int(f.readline()))\nnew_a = set(a)\nprint(len(new_a))\n","repo_name":"magicana-j/atcoder","sub_path":"abc085b/abc085b.py","file_name":"abc085b.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"11262385263","text":"\nimport pygame,sys\ndef check_collision(begin,bird,obj,obj2,floor):\n if begin :\n if pygame.sprite.groupcollide(obj,obj2,False,False):\n return True # with pipes \n \n\n if bird.rect.y <= 0: # with sky boundary \n return True \n\n if bird.rect.colliderect(floor): # with floor \n return True\n\n\ndef draw_text(screen,font,text,text_col,x,y):\n img = font.render(text,True,text_col)\n\n screen.blit(img,(x,y))\n \n","repo_name":"mohitxflakes/Mulitplayer-flappy-bird","sub_path":"utilses.py","file_name":"utilses.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"1203802859","text":"\"\"\"Models for storing photos.\"\"\"\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\n\nfrom website.utils import ResizedHashNameImageField\n\n\nclass Photo(models.Model):\n \"\"\"Model that represents a single, stored photo\"\"\"\n\n image = ResizedHashNameImageField(\n verbose_name=\"Foto\",\n max_width=900,\n max_height=600,\n upload_to=\"photos\"\n )\n\n creator = models.ForeignKey(\n get_user_model(),\n verbose_name=\"Gebruiker\",\n on_delete=models.PROTECT,\n )\n\n creation_date = models.DateTimeField(\n verbose_name=\"Uploaddatum\",\n auto_now_add=True,\n )\n","repo_name":"SebastiaanZ/minigigscyclingteam","sub_path":"website/photos/models/photos.py","file_name":"photos.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"26748336074","text":"import sys,os\nfrom PyQt5 import QtGui,QtCore,QtWidgets\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nfrom Layout import Layout\nfrom Help import Help\nimport images_qr\n\nclass Interface(QtWidgets.QMainWindow):\n\n def __init__(self,parent=None):\n super(Interface, self).__init__(parent)\n self.setupUi() \n \n def setupUi(self):\n self.widget()\n self.label()\n self.draw()\n self.spec()\n self.menu()\n self.layout()\n self.statusBar()\n self.setAcceptDrops(True)\n # x,y,width,length\n self.setGeometry(100, 100, 1300, 650)\n self.setWindowTitle('Mapping Analyst')\n self.setWindowIcon(QtGui.QIcon(':/images/logo.gif')) \n self.show()\n\n def widget(self): \n self.Item_List = QtWidgets.QListWidget(self)\n self.Id_Select = QtWidgets.QComboBox(self) \n self.Search_Line = QtWidgets.QLineEdit(self)\n self.Search_Button = QtWidgets.QPushButton(\"筛选\") \n self.Sigma_Flag = QtWidgets.QRadioButton(\"循环筛除距离 Median 值 3σ 以外的 die\")\n self.Die_Select = QtWidgets.QComboBox(self)\n self.Delete_Button = QtWidgets.QPushButton(\"剔除该die\")\n self.Reset_Button = QtWidgets.QPushButton(\"重置\")\n # set Font style\n self.setStyleSheet(\"QWidget{font-family:Microsoft YaHei}\")\n self.Item_List.setFont(QtGui.QFont('Microsoft YaHei',9, QtGui.QFont.Bold))\n \n def draw(self): \n self.fig1 = plt.figure(facecolor=('none'))\n self.fig2 = plt.figure(facecolor=('none'))\n # canvas1 for showing data dot\n self.canvas1 = FigureCanvas(self.fig1)\n # toolbar for canvas1\n self.toolbar = NavigationToolbar(self.canvas1, self)\n # canvas2 for showing wafer and die image\n self.canvas2 = FigureCanvas(self.fig2) \n\n def label(self):\n # fixed label\n self.label_fileNow = QtWidgets.QLabel(\"当前文件:\\t\")\n self.label_waferID = QtWidgets.QLabel(\"WaferID:\")\n self.label_Etest = QtWidgets.QLabel(\"Etest:\")\n self.label_Testkey = QtWidgets.QLabel(\"Testkey:\")\n self.label_Device = QtWidgets.QLabel(\"Device:\")\n self.label_W = QtWidgets.QLabel(\"W:\")\n self.label_L = QtWidgets.QLabel(\"L:\")\n self.label_Unit = QtWidgets.QLabel(\"Unit:\")\n self.label_DieCount = QtWidgets.QLabel(\"DieCount:\") \n self.label_Median = QtWidgets.QLabel(\"Median:\")\n self.label_Average = QtWidgets.QLabel(\"Average:\")\n self.label_Max = QtWidgets.QLabel(\"Max:\")\n self.label_Min = QtWidgets.QLabel(\"Min:\")\n self.label_Standard = QtWidgets.QLabel(\"Standard(σ):\")\n self.label_3sigma = QtWidgets.QLabel(\"3sigma(3σ):\")\n self.label_sigmMed = QtWidgets.QLabel(\"σ/median:\")\n # calculated value\n self.label_fileNow_C = QtWidgets.QLabel(\"\")\n self.label_Etest_C = QtWidgets.QLabel(\"\")\n self.label_Testkey_C = QtWidgets.QLabel(\"\")\n self.label_Device_C = QtWidgets.QLabel(\"\")\n self.label_W_C = QtWidgets.QLabel(\"\")\n self.label_L_C = QtWidgets.QLabel(\"\")\n self.label_Unit_C = QtWidgets.QLabel(\"\")\n self.label_DieCount_C = QtWidgets.QLabel(\"\")\n self.label_Median_C = QtWidgets.QLabel(\"\")\n self.label_Average_C = QtWidgets.QLabel(\"\")\n self.label_Max_C = QtWidgets.QLabel(\"\")\n self.label_Min_C = QtWidgets.QLabel(\"\")\n self.label_Standard_C = QtWidgets.QLabel(\"\")\n self.label_3sigma_C = QtWidgets.QLabel(\"\")\n self.label_sigmMed_C = QtWidgets.QLabel(\"\")\n \n def layout(self):\n layout = Layout()\n self.setCentralWidget(layout.wid)\n \n layout.fileFrame.addWidget(self.label_fileNow)\n layout.fileFrame.addWidget(self.label_fileNow_C)\n layout.fileFrame.addStretch()\n \n layout.idFrame.addWidget(self.label_waferID)\n layout.idFrame.addWidget(self.Id_Select) \n layout.searchFrame.addWidget(self.Search_Line)\n layout.searchFrame.addWidget(self.Search_Button)\n \n layout.listFrame.addWidget(self.Item_List)\n layout.barFrame.addWidget(self.toolbar)\n layout.canvasFrame1.addWidget(self.canvas1)\n layout.canvasFrame2.addWidget(self.canvas2)\n \n for w in [\n QtWidgets.QLabel(\"\"),\n QtWidgets.QLabel(\"\"),\n QtWidgets.QLabel(\"\"),\n self.label_Etest,\n self.label_Testkey,\n self.label_Device,\n self.label_W,\n self.label_L,\n self.label_Unit,\n self.label_DieCount,\n self.label_Median,\n self.label_Average,\n self.label_Max,\n self.label_Min,\n self.label_Standard,\n self.label_3sigma,\n self.label_sigmMed\n ]:\n layout.m1Frame.addWidget(w)\n layout.m1Frame.addStretch()\n layout.m1Frame.setSpacing(10) \n \n for w in [\n QtWidgets.QLabel(\"\"),\n QtWidgets.QLabel(\"\"),\n QtWidgets.QLabel(\"\"),\n self.label_Etest_C,\n self.label_Testkey_C,\n self.label_Device_C,\n self.label_W_C,\n self.label_L_C,\n self.label_Unit_C,\n self.label_DieCount_C,\n self.label_Median_C,\n self.label_Average_C,\n self.label_Max_C,\n self.label_Min_C,\n self.label_Standard_C,\n self.label_3sigma_C,\n self.label_sigmMed_C\n ]:\n layout.m2Frame.addWidget(w)\n layout.m2Frame.addStretch()\n layout.m2Frame.setSpacing(10)\n \n for w in [\n self.label_spec1,\n self.label_Blue,\n self.label_spec2,\n self.label_Lime,\n self.label_spec3,\n self.label_Violet,\n self.label_spec4,\n self.label_Red\n ]:\n layout.specFrame.addWidget(w)\n layout.specFrame.addStretch()\n\n layout.FlagFrame.addWidget(self.Sigma_Flag) \n layout.FlagFrame.addStretch()\n \n layout.DelFrame.addWidget(self.Die_Select)\n layout.DelFrame.addWidget(self.Delete_Button)\n layout.DelFrame.addWidget(self.Reset_Button)\n layout.DelFrame.addStretch() \n\n def spec(self):\n self.label_spec1 = QtWidgets.QLabel(\"δ(x): 0 ≤\")\n self.label_Blue = QtWidgets.QLabel(\"■\")\n self.label_spec2 = QtWidgets.QLabel(\"< 1% ≤\")\n self.label_Lime = QtWidgets.QLabel(\"■\")\n self.label_spec3 = QtWidgets.QLabel(\"< 5% ≤ \")\n self.label_Violet = QtWidgets.QLabel(\"■\") \n self.label_spec4 = QtWidgets.QLabel(\"< 50% ≤\")\n self.label_Red = QtWidgets.QLabel(\"■\") \n self.label_spec1.setFont(QtGui.QFont(\"Arial\",10))\n self.label_spec2.setFont(QtGui.QFont(\"Arial\",10))\n self.label_spec3.setFont(QtGui.QFont(\"Arial\",10))\n self.label_spec4.setFont(QtGui.QFont(\"Arial\",10))\n self.label_Blue.setFont(QtGui.QFont(\"Arial\",12))\n self.label_Lime.setFont(QtGui.QFont(\"Arial\",12))\n self.label_Violet.setFont(QtGui.QFont(\"Arial\",12))\n self.label_Red.setFont(QtGui.QFont(\"Arial\",12))\n \n pe1 = QtGui.QPalette()\n pe2 = QtGui.QPalette()\n pe3 = QtGui.QPalette()\n pe4 = QtGui.QPalette() \n \n pe1.setColor(QtGui.QPalette.WindowText,QtCore.Qt.blue)\n pe2.setColor(QtGui.QPalette.WindowText,QtCore.Qt.green)\n pe3.setColor(QtGui.QPalette.WindowText,QtCore.Qt.magenta)\n pe4.setColor(QtGui.QPalette.WindowText,QtCore.Qt.red) \n \n self.label_Blue.setPalette(pe1) \n self.label_Lime.setPalette(pe2) \n self.label_Violet.setPalette(pe3) \n self.label_Red.setPalette(pe4) \n \n def menu(self):\n # load new file action\n openFile = QtWidgets.QAction('Open', self)\n openFile.setStatusTip('载入新的数据')\n openFile.triggered.connect(self.showDialog)\n # exit action\n exitAction = QtWidgets.QAction('Exit', self)\n exitAction.setStatusTip('退出')\n exitAction.triggered.connect(QtWidgets.qApp.quit)\n # about action\n aboutAction = QtWidgets.QAction('About', self)\n aboutAction.setStatusTip('关于')\n aboutAction.triggered.connect(self.about) \n # add menubar\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('File')\n fileMenu.addAction(openFile)\n fileMenu.addAction(exitAction)\n aboutMenu = menubar.addMenu('Help')\n aboutMenu.addAction(aboutAction)\n \n def about(self):\n QtWidgets.QMessageBox.about(self,\"About\",Help.about) \n \n def closeEvent(self, event):\n reply = QtWidgets.QMessageBox.question(self,'Message',\"确认要退出吗?\", \n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)\n if reply == QtWidgets.QMessageBox.Yes:\n event.accept()\n else:\n event.ignore() \n\n # drag the file to mainwindow ---start\n def dragEnterEvent(self, event):\n if event.mimeData().hasUrls():\n event.acceptProposedAction()\n else:\n super(Interface, self).dragEnterEvent(event)\n \n def dragMoveEvent(self, event):\n super(Interface, self).dragMoveEvent(event)\n \n def dropEvent(self, event):\n if event.mimeData().hasUrls():\n # event.mimeData().urls() is a list contain all file that dragged in\n fname = event.mimeData().urls()[0].toLocalFile()\n self.judgeFile(fname)\n event.acceptProposedAction()\n else:\n super(Interface,self).dropEvent(event) \n # drag the file to mainwindow ---end\n \n def showDialog(self):\n fname = QtWidgets.QFileDialog.getOpenFileName(self,'载入 Mapping Data','/.dat')[0]\n self.judgeFile(fname)\n \n def judgeFile(self,fname):\n # re-write in subclass \n pass\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n ex = Interface()\n sys.exit(app.exec_())","repo_name":"whyeemcc/Mapping-Analyst","sub_path":"Ui.py","file_name":"Ui.py","file_ext":"py","file_size_in_byte":10895,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"41"} +{"seq_id":"14746279584","text":"#!/usr/bin./python\r\n\r\nimport sys\r\nimport time\r\nimport difflib\r\nimport pigpio\r\nimport xbee\r\n\r\nRxGpioPin = 18\r\nbaudRate = 9600\r\nnumBit = 8\r\n\r\nXB = xbee.XBEE('/dev/serial0',baudRate,1)\r\n\r\ntry:\r\n pi = pigpio.pi()\r\n pi.set_mode(RxGpioPin, pigpio.INPUT)\r\n pi.bb_serial_read_open(RxGpioPin, baudRate, numBit)\r\n \r\n while 1:\r\n (count,data) = pi.bb_serial_read(RxGpioPin)\r\n if count:\r\n data = data.decode()\r\n \r\n startIndex = 0\r\n endIndex = len(data)\r\n # find index of $GPGGA within GPSIn (-1 means it's not # in GPSIn)\r\n dataIndex = data.find(\"$GPGGA\", startIndex, endIndex)\r\n if dataIndex == -1:\r\n continue\r\n else: \r\n # find index end of line\r\n dataLineEnd = data.find('\\r', dataIndex, endIndex)\r\n GPSLine = data[dataIndex:dataLineEnd]\r\n numCommas = 0\r\n i = 7\r\n while i < len(GPSLine):\r\n if GPSLine[i] == ',':\r\n numCommas += 1\r\n else:\r\n if numCommas == 0:\r\n GPSTime += GPSLine[i]\r\n elif numCommas == 1:\r\n GPSLat += GPSLine[i]\r\n elif numCommas == 2:\r\n GPSDirNS += GPSLine[i]\r\n elif numCommas == 3:\r\n GPSLong += GPSLine[i]\r\n elif numCommas == 4:\r\n GPSDirEW += GPSLine[i]\r\n i += 1\r\n \r\n XB.sendData(GPSLine)\r\n \r\n time.sleep(1)\r\n\r\nexcept Exception as e:\r\n print(e)\r\n pi.bb_serial_read_close(RxGpioPin)\r\n pi.stop()\r\n","repo_name":"hmutschler15/rpi_git","sub_path":"engr/340/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"4068615288","text":"# Author: OMKAR PATHAK\n\n# Decimal to binary using Stack\n\nimport Stack\n\ndef dtob(decimal, base = 2):\n myStack = Stack.Stack()\n while decimal > 0:\n myStack.push(decimal % base)\n decimal //= base\n\n result = ''\n while not myStack.isEmpty():\n result += str(myStack.pop())\n\n return result\n\nif __name__ == '__main__':\n print(dtob(15))\n","repo_name":"OmkarPathak/Data-Structures-using-Python","sub_path":"Stack/P03_DecimalToBinary.py","file_name":"P03_DecimalToBinary.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":920,"dataset":"github-code","pt":"41"} +{"seq_id":"24025625667","text":"class Vertex:\n \"\"\"\n Класс: является вершиной графа\n , содержит значения: вида графа, координаты, значение вершины, родители и наследников вершины\n \"\"\"\n _graph_orientation = False # Вид графа: НЕ Ориентированный/Ориентированный (view graph: Not Orientation/Orientation)\n _number_of_vertices = None # Количество вершин (number of vertices)\n _dimension = None # Размерность матрицы смежности\n _dimention_P = None # Размерность под матрицы\n _possible_values = [] # Список возможных вариантов\n \n Error = [] # Список ошибок\n\n def __init__(self, x,y,P, app=None, value=None):\n \"\"\"\n Принимает: координаты вершины, значение\n Метод: инициализирует основные значение\n , инициализирует переменные родителей и наследников\n \"\"\"\n # Изначальные значения\n self.x = x # координаты по x (x coordinates)\n self.y = y # координаты по y (y coordinates)\n self.value = value # значение вершины (vertex value) (вычисляется в на основе других вершин)\n self._dimention_P = P # размерность под матрицы\n\n # Даём возможность обращаться из вершине выводить своё значение на поле\n self.app = app\n \n # Связи графа\n self.parents = [] # Родители, от какой вершины идет стрелочка (parent vertices)\n self.heirs = [] # Наследники, до какой вершины идет стрелочка (heirs vertices)\n\n def set_parents(self, parent, clear=False):\n \"\"\"\n Принимает: список родителей вершины, рубильник очищения переменной\n Метод: записывает список родителей [(v1,weight),(v2,weight)...,(wn,weight)]\n \"\"\"\n if clear: self.parents.clear() # Очищаем переменную (clear the variable)\n self.parents.append(parent) # Записываем родителей (write parent)\n def get_parents(self): return self.parents\n\n def set_heirs(self, heir, clear=False):\n \"\"\"\n Принимает: список наследников вершины, рубильник очищения переменной\n Метод: записывает списк наследников [(v1,weight),(v2,weight)...,(vn,weight)]\n \"\"\"\n if clear: self.heirs.clear() # Очищаем переменную (clear the variable)\n self.heirs.append(heir) # Записываем наследника (write heir)\n def get_heirs(self): return self.heirs\n \n def set_dimension(self,value):\n \"\"\"\n Получает: Разрядности матрицы (Количество вершин)\n Метод: записывает количество возможных значений\n \"\"\"\n self._dimension = value\n # Записываем возможние значения\n self._possible_values = list(range(1,value+1))\n\n def set_value(self,value):\n \"\"\"\n Получает: значение вершины\n , производится проверка повторения значений на осях\n , оповещает наследников о полученном значении\n \"\"\"\n self.value = value\n\n # оповещаем наследников\n for vertex, weight in self.heirs:\n vertex.remove_possible_values(self,self.value)\n\n # поиск одинаковых значений на осях\n for vertex,xyp in self.parents:\n if vertex.get_value() == self.value:\n Vertex.Error.append(\"Vertex[{}][{}] = Vertex[{}][{}]({}) == {}: Repeat_values\".format(self.x,self.y,vertex.x,vertex.y,xyp,self.value))\n with open(\"Error.txt\",\"a\") as fail:\n fail.write(\"Vertex[{}][{}] = Vertex[{}][{}]({}) == {}: Repeat_values\\n\".format(self.x,self.y,vertex.x,vertex.y,xyp,self.value))\n self.app.widget_Error[\"fg\"] = \"#CC5555\"\n self.app.widget_Error[\"text\"] = \"Repeat_values\"\n def get_value(self): return self.value\n \n def set_possible_values(self,list_values):\n \"\"\"\n Получает: Лист значений\n Метод: Записывает значения\n \"\"\"\n self._possible_values = list_values.copy()\n \n if self.value: return # если значение есть то вычислять нечего\n # пытаемся вычислить значение\n self.value_calculation()\n def get_possible_values(self): return self._possible_values\n\n\n #### Методы обработки данных ############################################################################\n def value_calculation(self):\n \"\"\"\n Метод: выбирает оставшийся вариант\n , проверяет на отсустви вариантов и записывает в начальный класс ошибки\n \"\"\"\n if len(self._possible_values) == 1:\n self.set_value(self._possible_values.pop())\n # выводим результат на поле\n self.app.widget_Pole[self.y*self._dimension+self.x][\"text\"] = self.value\n elif len(self._possible_values) == 0:\n Vertex.Error.append(\"Vertex[{}][{}] = {}: Not possibl_values\".format(self.x,self.y,self.value))\n with open(\"Error.txt\",\"a\") as fail:\n fail.write(\"Vertex[{}][{}] = {}: Not possibl_values\\n\".format(self.x,self.y,self.value))\n self.app.widget_Error[\"fg\"] = \"#CC5555\"\n self.app.widget_Error[\"text\"] = \"Not possibl_values\"\n def remove_possible_values(self,vertex,value,write_parents=True):\n \"\"\"\n Активируется: при получении значения у соседних вершин\n Получает: соседнюю вершину и вес связи между ними\n Метод: удаляет возможное значение\n , перемещаем наследника в родители\n , пытаемся вычислить значение вершины\n \"\"\"\n if self._dimension: # вычисляем, если есть возможные значения\n if self._possible_values.count(value):\n self._possible_values.remove(value)\n # удаление наследника возможно сократит вычисление\n # , но появляется логический баг\n # (не может определить ошибку повторение значений по осям)\n \n # перемещаем наследника в родители\n if write_parents:\n if vertex.x == self.x: # на горизонте\n self.parents.append((vertex,1))\n elif vertex.y == self.y: # на вертикали\n self.parents.append((vertex,2))\n else: # в матричном блоке\n self.parents.append((vertex,3))\n \n if self.value: return # выходим, если уже есть значение\n # Пытаемся вычислить значение\n self.value_calculation()\n \n\n def search_for_adjacent_vertices_by_axes(self):\n \"\"\"\n Метод: проводит сортировку смежных вершин по их осям\n \"\"\"\n axis_x = [self] # По x\n axis_y = [self] # По y\n axis_p = [self] # По матрице p\n\n for heir,weight in self.get_heirs():\n if heir.get_value(): continue # у этой вершины нет возможных вариантов\n\n # сортировка по осям\n if heir.x == self.x:\n axis_y.append(heir)\n if heir.y == self.y:\n axis_x.append(heir)\n if heir.x//self._dimention_P == self.x//self._dimention_P and heir.y//self._dimention_P == self.y//self._dimention_P:\n axis_p.append(heir)\n\n return [axis_x,axis_y,axis_p]","repo_name":"Shokhov/sudoku","sub_path":"Vertex.py","file_name":"Vertex.py","file_ext":"py","file_size_in_byte":8967,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"1202156581","text":"\n\nimport itertools\nfrom .writer import Writer\n\nclass GnucapWriter(Writer):\n \"\"\" Class GnucapWriter -- Handle gnucap format export\n\n Write signals to columns tab separated format used by Gnucap\n Signals should have the same reference\n \"\"\"\n\n def __init__(self):\n \"\"\" Instanciate the Reader\n\n Parameter\n ---------\n None\n\n Returns\n -------\n GnucapWriter\n The object instanciated\n \"\"\"\n super(GnucapWriter, self).__init__()\n self._prefixes = ['v', 'vout', 'vin', 'i', 'p', 'nv', 'ev', 'r', 'y',\n 'z', 'zraw', 'pd', 'ps', 'f', 'input', 'ioffset_',\n 'ipassive', 'pi', 'pidb', 'pm', 'pmdb', 'pp']\n self._prefixes.sort()\n self._prefixes.reverse()\n\n def _get_format_name(self):\n \"\"\" Return the format name\n\n Parameter\n ---------\n None\n\n Returns\n -------\n string\n The format identifier\n \"\"\"\n return 'gnucap'\n\n def _format_check(self, sigs):\n \"\"\" Check if all signals have the same reference\n\n Parameter\n ---------\n sigs: dict of Signals\n The Signal list to write\n\n Returns\n -------\n bool\n True if no issue found to write the Signal list in this format\n \"\"\"\n if not sigs:\n return False\n\n ref = list(sigs.values())[0].ref\n return all(s.ref is ref for s in sigs.values())\n\n def write_signals(self, sigs):\n \"\"\" Write signals to file\n Loop through all the data of each signal to write\n columns line by line.\n\n Gnucap format is (tab separated):\n #Time|Freq v(x) v(y) v(aa) ...\n 1.234 1.234 1.234 1.234\n 1.234 1.234 1.234 1.234\n 1.234 1.234 1.234 1.234\n ...\n\n Parameter\n ---------\n sigs: dict of Signals\n The list of Signals to write\n\n Returns\n -------\n Nothing\n \"\"\"\n SEPARATOR = '\\t'\n # Overwrite file or not\n self._ow = True\n if self._ow:\n mode = \"w\"\n else:\n mode = \"a\"\n\n # construct a list of signals, with the reference signal first\n s = list(sigs.values())\n s.insert(0, s[0].ref)\n\n with open(self._fn, mode) as f:\n # write the header\n names = list(map(self.format_sig_name, [x.name for x in s]))\n f.write('#%s\\n' % SEPARATOR.join(names))\n\n # write the data\n data = (iter(x.data) for x in s)\n for x in zip(*tuple(data)):\n f.write('%s\\n' % SEPARATOR.join(map(str, x)))\n\n def format_sig_name(self, name):\n \"\"\" Convert signal name to gnucap format e.g. vgs -> v(gs)\n Add parenthesis in the signal name to be compatible with gnucap format\n\n Parameter\n ---------\n name: string\n The name of the Signal to convert\n\n Returns\n -------\n name: string\n The converted name\n \"\"\"\n for p in self._prefixes:\n if name.startswith(p):\n return '%s(%s)' % (p, name.replace(p, '', 1))\n return name\n\n","repo_name":"agardelein/oscopy","sub_path":"src/oscopy/writers/gnucap_writer.py","file_name":"gnucap_writer.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"41"} +{"seq_id":"73788244923","text":"import numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\nN = 8\r\nX = []\r\n\r\nF = np.zeros((N, N), dtype=np.complex128)\r\nfor i in range(N):\r\n\tfor k in range(N):\r\n\t\tF[i, k] = math.e ** (-2 * np.pi * 1j * k * i / N)\r\n\r\nfig, axs = plt.subplots(N, figsize=(5, 8))\r\n\r\nfor i in range(N):\r\n axs[i].plot(np.real(F[i, :]))\r\n axs[i].set_title(f'Row {i+1}')\r\n\r\n axs[i].plot(np.imag(F[i, :]))\r\n axs[i].set_title(f'Row {i+1}')\r\n\r\nprint(np.allclose(np.eye(N), F.dot(F.T.conj()) / N, atol=1e-3))\r\n\r\n\r\nplt.tight_layout()\r\n\r\nplt.savefig('ex1.png', format='png') \r\nplt.savefig('ex1.pdf', format='pdf')\r\n\r\n# plt.show()\r\n","repo_name":"cosmincolceru/signal_processing","sub_path":"Lab_3/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"72782881722","text":"\n# a = int(input('Digite um numero: '))\n#\n# div = 0\n# for x in range(1, a+1):\n# resto = a % x\n# print(a)\n# if resto == 0:\n# div += 1\n#\n# if div == 2:\n# print(f'numero {a} é primo')\n# else:\n# print(f'não é primo {a}')\n\n# a = int(input('Digite um numero: '))\n# for i in range(a):\n# div = 0\n# for x in range(1, i+1):\n# resto = i % x\n# if resto == 0:\n# div += 1\n#\n# if div == 2:\n# print(f'numero {i} é primo')\n# print(f'o numero{a} possui {i}')\n\n\nnota = int(input('Entre com uma nota: '))\nwhile nota > 10:\n nota = int(input('NOta insvalida. Entre com a nota correta: '))\n\n","repo_name":"alyssonhyago/Curso-Python-","sub_path":"Introdução a programação com Python/Aula 03 - Laços/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"39012140307","text":"from pyld import jsonld\nimport json\nimport os\n\nfrom core.model import (\n Annotation,\n Identifier,\n get_one_or_create,\n)\n\nfrom core.app_server import (\n url_for,\n)\nfrom core.util.datetime_helpers import utc_now\n\nfrom .problem_details import *\n\ndef load_document(url):\n \"\"\"Retrieves JSON-LD for the given URL from a local\n file if available, and falls back to the network.\n \"\"\"\n files = {\n AnnotationWriter.JSONLD_CONTEXT: \"anno.jsonld\",\n AnnotationWriter.LDP_CONTEXT: \"ldp.jsonld\"\n }\n if url in files:\n base_path = os.path.join(os.path.split(__file__)[0], 'jsonld')\n jsonld_file = os.path.join(base_path, files[url])\n data = open(jsonld_file).read()\n doc = {\n \"contextUrl\": None,\n \"documentUrl\": url,\n \"document\": data\n }\n return doc\n else:\n return jsonld.load_document(url)\n\njsonld.set_document_loader(load_document)\n\nclass AnnotationWriter(object):\n\n CONTENT_TYPE = 'application/ld+json; profile=\"http://www.w3.org/ns/anno.jsonld\"'\n\n JSONLD_CONTEXT = \"http://www.w3.org/ns/anno.jsonld\"\n LDP_CONTEXT = \"http://www.w3.org/ns/ldp.jsonld\"\n\n @classmethod\n def annotations_for(cls, patron, identifier=None):\n annotations = [annotation for annotation in patron.annotations if annotation.active]\n if identifier:\n annotations = [annotation for annotation in annotations if annotation.identifier == identifier]\n return annotations\n\n @classmethod\n def annotation_container_for(cls, patron, identifier=None):\n if identifier:\n url = url_for('annotations_for_work',\n identifier_type=identifier.type,\n identifier=identifier.identifier,\n library_short_name=patron.library.short_name,\n _external=True)\n else:\n url = url_for(\"annotations\", library_short_name=patron.library.short_name, _external=True)\n annotations = cls.annotations_for(patron, identifier=identifier)\n\n latest_timestamp = None\n if len(annotations) > 0:\n # patron.annotations is already sorted by timestamp, so the first\n # annotation is the most recent.\n latest_timestamp = annotations[0].timestamp\n\n container = dict()\n container[\"@context\"] = [cls.JSONLD_CONTEXT, cls.LDP_CONTEXT]\n container[\"id\"] = url\n container[\"type\"] = [\"BasicContainer\", \"AnnotationCollection\"]\n container[\"total\"] = len(annotations)\n container[\"first\"] = cls.annotation_page_for(patron, identifier=identifier, with_context=False)\n return container, latest_timestamp\n\n\n @classmethod\n def annotation_page_for(cls, patron, identifier=None, with_context=True):\n if identifier:\n url = url_for('annotations_for_work',\n identifier_type=identifier.type,\n identifier=identifier.identifier,\n library_short_name=patron.library.short_name,\n _external=True)\n else:\n url = url_for(\"annotations\", library_short_name=patron.library.short_name, _external=True)\n annotations = cls.annotations_for(patron, identifier=identifier)\n details = [cls.detail(annotation, with_context=with_context) for annotation in annotations]\n\n page = dict()\n if with_context:\n page[\"@context\"] = cls.JSONLD_CONTEXT\n page[\"id\"] = url\n page[\"type\"] = \"AnnotationPage\"\n page[\"items\"] = details\n return page\n\n @classmethod\n def detail(cls, annotation, with_context=True):\n item = dict()\n if with_context:\n item[\"@context\"] = cls.JSONLD_CONTEXT\n item[\"id\"] = url_for(\"annotation_detail\", annotation_id=annotation.id,\n library_short_name=annotation.patron.library.short_name,\n _external=True)\n item[\"type\"] = \"Annotation\"\n item[\"motivation\"] = annotation.motivation\n item[\"body\"] = annotation.content\n if annotation.target:\n target = json.loads(annotation.target)\n compacted = jsonld.compact(target, cls.JSONLD_CONTEXT)\n del compacted[\"@context\"]\n item[\"target\"] = compacted\n if annotation.content:\n body = json.loads(annotation.content)\n compacted = jsonld.compact(body, cls.JSONLD_CONTEXT)\n del compacted[\"@context\"]\n item[\"body\"] = compacted\n\n return item\n\nclass AnnotationParser(object):\n\n @classmethod\n def parse(cls, _db, data, patron):\n if patron.synchronize_annotations != True:\n return PATRON_NOT_OPTED_IN_TO_ANNOTATION_SYNC\n\n try:\n data = json.loads(data)\n if 'id' in data and data['id'] is None:\n del data['id']\n data = jsonld.expand(data)\n except ValueError as e:\n return INVALID_ANNOTATION_FORMAT\n\n if not data or not len(data) == 1:\n return INVALID_ANNOTATION_TARGET\n data = data[0]\n\n target = data.get(\"http://www.w3.org/ns/oa#hasTarget\")\n if not target or not len(target) == 1:\n return INVALID_ANNOTATION_TARGET\n target = target[0]\n\n source = target.get(\"http://www.w3.org/ns/oa#hasSource\")\n\n if not source or not len(source) == 1:\n return INVALID_ANNOTATION_TARGET\n source = source[0].get('@id')\n\n try:\n identifier, ignore = Identifier.parse_urn(_db, source)\n except ValueError as e:\n return INVALID_ANNOTATION_TARGET\n\n motivation = data.get(\"http://www.w3.org/ns/oa#motivatedBy\")\n if not motivation or not len(motivation) == 1:\n return INVALID_ANNOTATION_MOTIVATION\n motivation = motivation[0].get('@id')\n if motivation not in Annotation.MOTIVATIONS:\n return INVALID_ANNOTATION_MOTIVATION\n\n loans = patron.loans\n loan_identifiers = [loan.license_pool.identifier for loan in loans]\n if identifier not in loan_identifiers:\n return INVALID_ANNOTATION_TARGET\n\n content = data.get(\"http://www.w3.org/ns/oa#hasBody\")\n if content and len(content) == 1:\n content = content[0]\n else:\n content = None\n\n target = json.dumps(target)\n extra_kwargs = {}\n if motivation == Annotation.IDLING:\n # A given book can only have one 'idling' annotation.\n pass\n elif motivation == Annotation.BOOKMARKING:\n # A given book can only have one 'bookmarking' annotation\n # per target.\n extra_kwargs['target'] = target\n\n annotation, ignore = Annotation.get_one_or_create(\n _db, patron=patron, identifier=identifier,\n motivation=motivation, on_multiple='interchangeable',\n **extra_kwargs\n )\n annotation.target = target\n if content:\n annotation.content = json.dumps(content)\n annotation.active = True\n annotation.timestamp = utc_now()\n\n return annotation\n","repo_name":"NYPL-Simplified/circulation","sub_path":"api/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":7199,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"41"} +{"seq_id":"17712241403","text":"import glob\nimport json\nimport csv\nimport re\nimport os\nimport sys\nimport json\nimport hashlib\nimport time\nimport shutil\nimport random\nimport hashlib\nfrom pathlib import Path\nimport zipfile\n\nroot = \"./Parent\"\ndest = \"./dest2\"\nmetadata_folder = \"metadata\"\ncsv_folder = \"csv_metadata\"\ndest_CSV = \"metadata.csv\"\nmodified_CSV = \"\"\n\ncollection_name = \"TWNFT\"\nextension = \"gif\"\nipfs_url = \"ipfs://\"\n\npre_defined = [\"tokenId\",\"name\",\"description\",\"image\",\"edition\"]\n\ndef create_csv():\n headers = []\n for p in pre_defined:\n headers.append(p)\n for file in glob.glob(dest+\"/\"+metadata_folder+\"/*.json\"):\n json_ob = json.load(open(file,encoding='utf-8-sig'))\n for i in json_ob[\"attributes\"]:\n headers.append(i[\"trait_type\"])\n headers = list(dict.fromkeys(headers))\n\n if not os.path.exists(dest+\"/\"+csv_folder):\n os.makedirs(dest+\"/\"+csv_folder)\n with open(dest+\"/\"+csv_folder+\"/\"+dest_CSV, 'w',newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow(headers)\n for file in sorted(glob.glob(dest+\"/\"+metadata_folder+\"/*.json\"), key=lambda x: float(re.findall(\"(\\d+)\", x)[0])):\n row = [\"\"] * len(headers)\n json1 = json.load(open(file,encoding='utf-8-sig'))\n row[headers.index(\"tokenId\")] = int(json1[\"tokenId\"])\n row[headers.index(\"name\")] = collection_name + \" #\"+str(json1[\"tokenId\"])\n row[headers.index(\"description\")] = \"\"\n row[headers.index(\"image\")] = \"\"\n row[headers.index(\"edition\")] = \"\"\n for i in json1[\"attributes\"]:\n trtype = i[\"trait_type\"]\n index = headers.index(trtype)\n row[index] = i[\"value\"]\n writer.writerow(row)\n print(\"Validating the CSV\")\n print(\"------------------\")\n print(\"* CSV to Json\")\n csv_to_metadata(dest+\"/\"+csv_folder+\"/\"+dest_CSV)\n print(\"* Validating Metadata Json Against Parent\")\n is_all_good(False)\n print(\"* Delete Temp metadata.json\")\n print(\"------------------\")\n os.remove(dest+\"/\"+csv_folder+\"/metadata.json\")\n\ndef private_index(arr,index):\n try:\n arr.index(index)\n return True\n except ValueError:\n return False\n\ndef private_make_attriubte_array(headers, row):\n arr = []\n for h in headers:\n if not private_index(pre_defined,h):\n if row[headers.index(h)].strip():\n attr = {}\n attr[\"trait_type\"] = h\n attr[\"value\"] = private_strip(row[headers.index(h)])\n arr.append(attr)\n return arr\n\n\ndef csv_to_metadata(path):\n private_create_folder_if_not(dest+\"/\"+csv_folder+\"/metadata/\")\n jsons = []\n attr_hash = []\n with open(path, 'r', encoding='utf-8-sig') as file:\n csvreader = csv.reader(file, delimiter=',')\n headers = next(csvreader, None)\n for row in csvreader:\n map = {}\n for p in pre_defined:\n map[p] = private_strip(row[headers.index(p)])\n map[\"tokenId\"] = int(map[\"tokenId\"])\n map[\"attributes\"] = private_make_attriubte_array(headers,row)\n attr_hash.append(hash_str(json.dumps(private_KMOrder(map[\"attributes\"]))))\n private_ob_file(dest+\"/\"+csv_folder+\"/metadata/\"+str(map[\"tokenId\"])+\".json\",map)\n jsons.append(map)\n if len(set([x for x in attr_hash if attr_hash.count(x) > 1])) > 0:\n sys.exit(\"ERROR - Two NFTS are Equal \")\n private_ob_file(dest+\"/\"+csv_folder+\"/metadata/metadata.json\",jsons)\n\ndef hash_file(filename):\n # h = hashlib.sha1()\n # with open(filename, 'rb') as file:\n # chunk = 0\n # while chunk != b'':\n # chunk = file.read(1024)\n # h.update(chunk)\n # return h.hexdigest()\n return str(os.path.getsize(filename))\n\ndef hash_str(strx):\n return hashlib.md5(strx.encode(\"utf-8\")).hexdigest()\n\ndef rootToHashMap():\n all_data = {}\n for file in glob.glob(root+\"/*\"):\n if os.path.isdir(file):\n jsons = file+\"/\"+metadata_folder\n for file_2 in glob.glob(jsons+\"/*.json\"):\n json_ob = json.load(open(file_2))\n key = str(hash_str(json.dumps(private_KMOrder(json_ob[\"attributes\"]))))\n value = hash_file(os.path.splitext(file+\"/\"+os.path.basename(file_2))[0]+\".\"+extension)\n all_data[str(key)] = value\n return all_data\n\ndef destCSVJsonToHashMap():\n all_data = {}\n for json_ob in json.load(open(dest+\"/\"+csv_folder+\"/metadata.json\",encoding='utf-8-sig')):\n key = str(hash_str(json.dumps(private_KMOrder(json_ob[\"attributes\"]))))\n value = hash_file(dest+\"/\"+json_ob[\"tokenId\"]+\".\"+extension)\n all_data[str(key)] = value\n return all_data\n\ndef destToHashMap():\n all_data = {}\n jsons = dest+\"/\"+metadata_folder\n for file_2 in glob.glob(jsons+\"/*.json\"):\n json_ob = json.load(open(file_2, encoding='utf-8-sig'))\n key = str(hash_str(json.dumps(private_KMOrder(json_ob[\"attributes\"]))))\n value = hash_file(os.path.splitext(dest+\"/\"+os.path.basename(file_2))[0]+\".\"+extension)\n all_data[str(key)] = value\n return all_data\n\ndef updateNFTPath():\n jsons = []\n for json_ob in json.load(open(dest+\"/\"+csv_folder+\"/metadata/metadata.json\",encoding='utf-8-sig')):\n id = json_ob[\"tokenId\"]\n del json_ob[\"tokenId\"]\n json_ob[\"image\"] = ipfs_url+\"/\"+str(id)+\".\"+extension\n jsons.append(json_ob)\n private_ob_file(dest+\"/\"+csv_folder+\"/metadata/\"+str(id)+\".json\",json_ob)\n private_ob_file(dest+\"/\"+csv_folder+\"/metadata/metadata.json\",jsons)\n\n\ndef parent_validation():\n attr_hash = []\n for file in glob.glob(root+\"/*\"):\n if os.path.isdir(file):\n jsons = file+\"/\"+metadata_folder\n if not os.path.isdir(jsons):\n sys.exit(\"Path not found: \"+jsons)\n for file_2 in glob.glob(jsons+\"/*.json\"):\n json_ob = json.load(open(file_2))\n tokenId = json_ob[\"tokenId\"]\n attr_hash.append(hash_str(json.dumps(private_KMOrder(json_ob[\"attributes\"]))))\n nft = file+\"/\"+str(tokenId)+\".\"+extension\n if not Path(file_2).stem == str(tokenId):\n sys.exit(\"Json file name does not match with the token Id \"+file_2)\n if not (os.path.exists(nft)):\n sys.exit(\"Please Fix, Path not found: \"+nft) \n if len(set([x for x in attr_hash if attr_hash.count(x) > 1])) > 0:\n sys.exit(\"ERROR - Two NFTS are Equal \")\n print(\"Parent Validation Success!!\")\n\ndef copy():\n private_empty(dest)\n private_create_folder_if_not(dest+\"/\"+metadata_folder)\n for file in glob.glob(root+\"/*\"):\n if os.path.isdir(file):\n jsons = file+\"/\"+metadata_folder\n if os.path.isdir(jsons):\n for file_2 in glob.glob(jsons+\"/*.json\"):\n json_ob = json.load(open(file_2))\n nft = file+\"/\"+str(json_ob[\"tokenId\"])+\".\"+extension\n uid = time.time()\n new_name = dest+\"/\"+str(uid)+\".\"+extension\n json_ob[\"tokenId\"] = uid\n shutil.copy(nft, new_name)\n private_ob_file(dest+\"/\"+metadata_folder+\"/\"+str(uid)+\".json\",json_ob)\n\n\ndef randomize():\n file_count = len(glob.glob1(dest+\"/\"+metadata_folder, \"*.json\"))\n arr = []\n for i in range(1, file_count+1):\n arr.append(i)\n random.shuffle(arr)\n for file_2 in glob.glob(dest+\"/\"+metadata_folder+\"/*.json\"):\n json_ob = json.load(open(file_2,encoding='utf-8-sig'))\n uid = arr.pop()\n nft = dest+\"/\"+str(json_ob[\"tokenId\"])+\".\"+extension\n new_name = dest+\"/\"+str(uid)+\".\"+extension\n os.rename(nft, new_name)\n json_ob[\"tokenId\"] = uid\n json_ob[\"name\"] = collection_name+\" #\" + \\\n str(json_ob[\"tokenId\"]) # remove if you have unique name\n private_ob_file(dest+\"/\"+metadata_folder+\"/\"+str(uid)+\".json\",json_ob)\n os.remove(file_2)\n\n\n\ndef is_all_good(normal = True):\n rH = rootToHashMap()\n private_ob_file(\"root.json\",rH)\n if normal:\n dH = destToHashMap()\n private_ob_file(\"dest.json\",dH)\n if not rH == dH:\n print(\"---------ERROR-----------\")\n print(\"Diff \"+str(len(set(private_concat(rH)) ^ set(private_concat(dH)))/2))\n sys.exit(\"Validation Failed, Destiantion Collection not match with Root Collection\")\n else:\n dCH = destCSVJsonToHashMap()\n private_ob_file(\"dest_CSV_json.json\",dCH)\n if not rH == dCH:\n print(\"---------ERROR-----------\")\n print(\"Diff \"+str(len(set(private_concat(rH)) ^ set(private_concat(dCH)))/2))\n sys.exit(\"Validation Failed, Destiantion Collection not match with Root Collection\")\n print(\"Yup, All good\")\n\ndef make_zip(chunk_count):\n arr = []\n for f in sorted(glob.glob(dest+\"/*.\"+extension), key=lambda el: int(Path(el).stem)):\n arr.append(f)\n chunks = private_divide_chunks(arr,chunk_count)\n for files in chunks:\n name = Path(files[0]).stem+\"-\"+Path(files[len(files)-1]).stem\n print(\"Creating... \"+name+\".zip\")\n with zipfile.ZipFile(dest+\"/\"+name+\".zip\", 'w',zipfile.ZIP_DEFLATED) as zipMe: \n for file in files:\n zipMe.write(file,arcname= \"./\"+name+\"/\"+os.path.basename(file))\n zipMe.close()\n for f1 in files:\n os.remove(f1)\n print(\"Done Making Zip\")\n\ndef private_strip(text):\n if not text:\n return \"\"\n return text.strip() \n\ndef private_divide_chunks(l, n):\n\tfor i in range(0, len(l), n):\n\t\tyield l[i:i + n]\n\ndef private_empty(path):\n shutil.rmtree(path)\n os.mkdir(path)\n\ndef private_KMOrder(attributes) : \n return sorted(attributes, key=lambda d: d['trait_type'])\n\ndef private_concat(list):\n arr = []\n for x,y in list.items():\n arr.append(x+\"\"+y)\n return arr\n\ndef private_ob_file(json_path,ob):\n with open(json_path, 'w+',encoding='utf-8-sig') as f:\n json.dump(ob, f, indent=4, ensure_ascii=False)\n\ndef private_create_folder_if_not(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n# print(\"Script Started!\")\n# parent_validation()\n# print(\"Coping to Destination\")\n# copy()\n# print(\"Copy Validation Started\")\n# is_all_good()\n# print(\"Randomization Started\")\n# randomize()\n# print(\"Validation Started After Randomization\")\n# is_all_good()\n# print(\"Creating CSV from Collection\")\n# create_csv()\n# print(\"CSV to Metadata Json\") #do this after editing the original CSV\n#csv_to_metadata(dest+\"/\"+csv_folder+\"/final_metadata_no_space.csv\")\n# print(\"Metadata Created\")\n#print(\"Now zipping collection for upload\")\n#make_zip(500)\n#updateNFTPath()\n","repo_name":"imalhasaranga/NFT-Metadata-Processing-Scripts","sub_path":"MDProcessor.py","file_name":"MDProcessor.py","file_ext":"py","file_size_in_byte":10777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"12049350319","text":"from .distribution import (\n MeasurementOutcomeDistribution,\n change_tuple_dict_keys_to_comma_separated_integers,\n compute_clipped_negative_log_likelihood,\n compute_jensen_shannon_divergence,\n compute_mmd,\n compute_multi_rbf_kernel,\n compute_rbf_kernel,\n create_bitstring_distribution_from_probability_distribution,\n evaluate_distribution_distance,\n is_measurement_outcome_distribution,\n is_normalized,\n load_measurement_outcome_distribution,\n load_measurement_outcome_distributions,\n normalize_measurement_outcome_distribution,\n preprocess_distibution_dict,\n save_measurement_outcome_distribution,\n save_measurement_outcome_distributions,\n)\n\n\nclass BitstringDistribution(MeasurementOutcomeDistribution):\n \"\"\"\n This is just an alias that might be removed in future version.\n It is preferred to use MeasurementOutcomeDistribution.\n \"\"\"\n\n def get_qubits_number(self) -> int:\n return self.get_number_of_subsystems()\n\n def __repr__(self) -> str:\n output = f\"BitstringDistribution(input={self.distribution_dict})\"\n return output\n","repo_name":"zapatacomputing/z-quantum-core","sub_path":"src/python/zquantum/core/bitstring_distribution.py","file_name":"bitstring_distribution.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"44"} +{"seq_id":"42193151377","text":"\"\"\"\nThe function of this module is:\n - Getting all setting, create necessaries variables\n - Execute all necessaries tasks before main module (@decorators, etc)\n - Execute main module\n\"\"\"\nfrom .GlobalDecorators import DecoratorClass\nfrom .GlobalSettings import GLOBAL_DECORATOR\n\nclass Configurator:\n def run(self, modules):\n \"\"\"\n Args:\n modules: List array with modules to be executed this time\n \"\"\"\n if GLOBAL_DECORATOR > 0:\n print(f\"GLOBAL_DECORATOR is {GLOBAL_DECORATOR}\")\n decorator_class = DecoratorClass()\n decorator_class.start_wrapper_decoration(modules=modules)\n else:\n print(\"GLOBAL_DECORATOR is not activated\")\n","repo_name":"Gabvaztor/TFBoost","sub_path":"src/config/Configurator.py","file_name":"Configurator.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"33298427059","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport os\n\npath = r'E:\\newdata\\dictatorships fx\\work data'\n\nlenth = 648\nIRR = pd.read_csv(path + os.sep + 'IRRUSD=X.csv')\nIRR_C = list(IRR['Close'])\n\n\nRUB = pd.read_csv(path + os.sep + 'RUBUSD=X.csv')\nRUB_C = list(RUB['Close'])\n\nPKR = pd.read_csv(path + os.sep + 'PKR=X.csv')\nPKR_Cr = list(PKR['Close'])\nPKR_C = [1 / i for i in PKR_Cr]\n\n# VES = pd.read_csv(path + os.sep + 'USD_VES历史数据.csv', thousands=',').head(lenth)\n# VES_Crr = list(VES['收盘'])\n# VES_Cr = list(reversed(VES_Crr))\n# VES_C = [1 / i for i in VES_Cr]\n\nIi, = plt.plot(IRR_C, label='IRR')\nIr, = plt.plot(RUB_C, label='RUB')\nIp, = plt.plot(PKR_C, label='PKR')\n# Iv, = plt.plot(VES_C, label='VES')\nplt.tight_layout()\nplt.grid()\nplt.legend()\nplt.show()","repo_name":"webclinic017/testbed2","sub_path":"DictatorshipsFxCompare.py","file_name":"DictatorshipsFxCompare.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"41116325463","text":"#!/usr/bin/env python3\n\nimport omni_const\nimport omni_config\nimport omni_unpwd\n\nimport sys\nimport requests\nimport csv\n\nfrom omnissiah.db import OmniDB\nfrom omnissiah.omnissiah import OmniProgram\n\ninsert_raw_mac_sql = {'mariadb':'INSERT IGNORE INTO raw_mac (registry, assignment, organization, address) VALUES (%s, %s, %s, %s);',\n 'pgsql':'INSERT INTO raw_mac (registry, assignment, organization, address) VALUES (%s, %s, %s, %s) ON CONFLICT DO NOTHING;'}\n\n\ndef download_macs(urls):\n result = []\n for url in urls:\n response = requests.get(url, allow_redirects=True)\n if response.status_code == 200:\n response.encoding = 'utf-8'\n result.append(response.text)\n return result\n\ndef parse_csv(texts):\n result = []\n for text in texts:\n reader = csv.reader(text.strip().splitlines())\n next(reader, None)\n result.extend([tuple(row) for row in reader])\n return result\n\ndef save_macs(db, macs):\n cur = db.cursor()\n if macs:\n cur.executemany(insert_raw_mac_sql[omni_config.dbtype], macs)\n db.commit()\n cur.close()\n\n\ndef main():\n try:\n exitcode = 1\n program = OmniProgram(omni_config.log_path, omni_config.log_level, omni_config.log_format, omni_config.log_date_format)\n omnidb = OmniDB(omni_config.dbtype, omni_config.dbhost, omni_config.dbname,\n omni_unpwd.db_raw_user, omni_unpwd.db_raw_password, log=program.log, program=program.name, ssl=omni_config.dbssl)\n macs = parse_csv(download_macs(omni_const.ieee_oui_csv_url))\n omnidb.run_program_queries(stage=1)\n save_macs(omnidb, macs)\n omnidb.run_program_queries(stage=2)\n omnidb.close()\n exitcode = 0\n except:\n program.log.exception('Fatal error')\n finally:\n return exitcode\n\nif __name__ == \"__main__\":\n sys.exit(main())","repo_name":"DeusMechanicus/Omnissiah","sub_path":"code/raw_mac.py","file_name":"raw_mac.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"33947381488","text":"import psycopg2\r\nimport pymssql\r\nimport pymysql\r\nimport sys\r\n\r\ndef connect_to_database(db_config):\r\n try:\r\n db_type = db_config.get(\"db_type\").lower()\r\n if db_type == \"postgresql\":\r\n conn = psycopg2.connect(**db_config)\r\n elif db_type == \"mssql\":\r\n conn = pymssql.connect(**db_config)\r\n elif db_type == \"mariadb\":\r\n conn = pymysql.connect(**db_config)\r\n else:\r\n print(\"Error: Unsupported database type specified in config.\")\r\n sys.exit(1)\r\n return conn\r\n except Exception as e:\r\n print(f\"Error connecting to the database: {str(e)}\")\r\n sys.exit(1)\r\n\r\ndef create_log_table(conn):\r\n try:\r\n cursor = conn.cursor()\r\n cursor.execute(\"\"\"\r\n CREATE TABLE IF NOT EXISTS LogTable (\r\n id SERIAL PRIMARY KEY,\r\n log_entry TEXT,\r\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP\r\n )\r\n \"\"\")\r\n conn.commit()\r\n except Exception as e:\r\n print(f\"Error creating log table: {str(e)}\")\r\n conn.rollback()\r\n sys.exit(1)\r\n\r\ndef insert_log_entry(conn, log_entry):\r\n try:\r\n cursor = conn.cursor()\r\n cursor.execute(\"INSERT INTO LogTable (log_entry) VALUES (%s)\", (log_entry,))\r\n conn.commit()\r\n except Exception as e:\r\n print(f\"Error inserting log entry into the database: {str(e)}\")\r\n conn.rollback()\r\n sys.exit(1)\r\n\r\ndef get_logs_from_database(conn, start_time=None, end_time=None):\r\n try:\r\n cursor = conn.cursor()\r\n query = \"SELECT log_entry, timestamp FROM LogTable\"\r\n if start_time and end_time:\r\n query += \" WHERE timestamp BETWEEN %s AND %s\"\r\n cursor.execute(query, (start_time, end_time))\r\n else:\r\n cursor.execute(query)\r\n logs = cursor.fetchall()\r\n return logs\r\n except Exception as e:\r\n print(f\"Error retrieving logs from the database: {str(e)}\")\r\n conn.rollback()\r\n sys.exit(1)\r\n\r\ndef close_database_connection(conn):\r\n try:\r\n conn.close()\r\n except Exception as e:\r\n print(f\"Error closing the database connection: {str(e)}\")\r\n sys.exit(1)\r\n","repo_name":"shrivastava67/Collector-Beta","sub_path":"collector/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"72950703814","text":"'''\ntmain.py\nAuthor: Tim Attwell\nDate: 15/10/2020\n\nMain funtion of entity_connections. \n'''\nfrom argparse import ArgumentParser\nimport training\nimport nyt_query\nimport dminr_query\nimport torch\nfrom flask import Flask, request, jsonify\nimport relevance\nimport string\nimport time\nimport api\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--train\", type=bool, default=False, help=\"True = Train, *False = Don't Train\")\n parser.add_argument(\"--classify\", type=bool, default=True, help=\"True = do classification task, *False = Don't do classification task\")\n parser.add_argument(\"--model_size\", type=str, default='large', help=\"\"\"*large = BERT Large\n base = BERT base\"\"\")\n parser.add_argument(\"--epochs\", type=int, default=4, help=\"Define number of training epochs (*4)\")\n parser.add_argument(\"--bs\", type=int, default=32, help=\"Define batch size (*32)\")\n #parser.add_argument(\"--training_data\", type=str, default=\"./data/ner_dataset.csv\", help=\"\"\"Path to training data.\n # default=./.data/ner_dataset.csv\"\"\")\n parser.add_argument(\"--save_model\", type=bool, default=True, help=\"*True = Save model, False = Do not save model\")\n parser.add_argument(\"--nyt\", type=bool, default=False, help=\"Use the New York Times API\")\n parser.add_argument(\"--local\", type=bool, default=False, help=\"Run search in command line rather than setting up localhost API.\")\n \n args = parser.parse_args()\n\n # Include max sentence length and device in the \"args\" object \n # for use in other functions\n args.max_len = 128\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n \n # If --train True flag is called, train the model from scratch.\n # If not, then load the model from pretrained\n if args.train == True:\n model, embeddings, tokenizer = training.build_model(args)\n else:\n model, embeddings, tokenizer = training.load_model(args)\n\n # If --classify flag is False, then end the program after model training \n # or loading is complete.\n if args.classify == True:\n # If --local is True, then run the search straight from the command line,\n # but if False, then set up the network accessible API.\n if args.local == True:\n task = dminr_query.SearchTask(args, model, embeddings, tokenizer)\n task.recurrant_search()\n else:\n api.run(args, model, embeddings, tokenizer, port_num=1414)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"timattwell/entity_connections","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"29475331633","text":"def lin():\r\n print(\"-=\" * 12)\r\n\r\n\r\nlin()\r\nprint(\" == MATRIZ EM PYTHON == \")\r\nlin()\r\n\r\nmatriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\r\n\r\nfor l in range(0,3):\r\n for c in range(0,3):\r\n matriz[l][c] = int(input(f\"Digite um valor para a coordenada [{l}, {c}] \"))\r\n\r\nlin()\r\nfor l in range(0,3):\r\n for c in range(0,3):\r\n print(f\"[{matriz[l][c]:^3}]\", end=\" \")\r\n print()\r\nlin()\r\n\r\n","repo_name":"esau-morais/init.py","sub_path":"Exercises/challenge086.py","file_name":"challenge086.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"71687961412","text":"class RelevancyCheck:\n @staticmethod\n def check_relevancy(content):\n relevant_keywords = [\"resign\", \"step down\", \"leave\", \"departure\"]\n \n # Check for the word \"superintendent\"\n if \"superintendent\" not in content.lower():\n return False\n\n # Check for any of the relevant keywords\n if not any(keyword in content.lower() for keyword in relevant_keywords):\n return False\n\n return True\n","repo_name":"IsaacSmith2/DistrictFinder","sub_path":"relevancy_check.py","file_name":"relevancy_check.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"23145417306","text":"from django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.conf import settings\nfrom django.shortcuts import render, redirect, get_object_or_404\n\n\nfrom .models import Group, Post, User\nfrom .forms import PostForm\n\n\n\ndef index(request):\n posts = Post.objects.select_related('group')[:settings.LIMIT_POSTS]\n post_list = Post.objects.all()\n paginator = Paginator(post_list, settings.LIMIT_POSTS)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context = {\n 'posts': posts,\n 'page_obj': page_obj,\n }\n return render(request, 'posts/index.html', context)\n\n\ndef group_posts(request, slug):\n group = get_object_or_404(Group, slug=slug)\n posts = Post.objects.select_related('group')[:settings.LIMIT_POSTS]\n post_list = Post.objects.all()\n paginator = Paginator(post_list, settings.LIMIT_POSTS)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context = {\n 'group': group,\n 'page_obj': page_obj,\n }\n return render(request, 'posts/group_list.html', context)\n\n\ndef user_contact(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n name = form.cleaned_data['name']\n email = form.cleaned_data['email']\n subject = form.cleaned_data['subject']\n message = form.cleaned_data['body']\n form.save()\n return redirect('/thank-you/')\n return render(request, 'contact.html', {'form': form})\n form = ContactForm()\n return render(request, 'users/contact.html', {'form': form})\n\n\ndef profile(request, username):\n author = get_object_or_404(User, username=username)\n post_list = author.post.all()\n posts = Post.objects.all()\n paginator = Paginator(post_list, settings.LIMIT_POSTS)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n post_number = post_list.count()\n context = {\n 'author': author,\n 'posts': posts,\n 'page_obj': page_obj,\n }\n return render(request, 'posts/profile.html', context)\n\n\ndef post_detail(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n author_posts = post.author.post.all()\n context = {\n 'post': post,\n 'author_posts': author_posts.count(),\n }\n return render(request, 'posts/post_detail.html', context) \n\n@login_required\ndef post_create(request):\n form = PostForm(request.POST or None)\n if form.is_valid():\n post.author = request.user\n form.save()\n return redirect('posts:profile', username=request.user)\n return render(request, 'posts/post_create.html', {'form': form})\n\n@login_required\ndef post_edit(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n if post.author != request.user:\n return redirect('posts:post_detail', post_id=post_id)\n if form.is_valid():\n form.save()\n return redirect('posts:post_detail', post_id=post_id)\n return render(request, 'posts/post_create.html', {'form': form, 'post': post})\n","repo_name":"IrinaTsvetkova25041984/hw02_community","sub_path":"yatube/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"69885606533","text":"from typing import List\nfrom collections import defaultdict, Counter\nimport heapq\n\n\n# 하고나서 보니 bfs써도 됬을거 같긴 함\n# 아니, 간선 값이 1로 동일하니 bfs쓰는 문제가 맞는거 같은데...?\ndef solution(n: int, e: List[List[int]]):\n edges = defaultdict(list)\n distances = defaultdict(lambda: n + 1)\n for s, d in e:\n edges[s].append(d)\n edges[d].append(s)\n\n start = 1\n distances[start] = 0\n\n queue = []\n heapq.heappush(queue, [distances[start], start])\n\n while queue:\n p_distance, p_destination = heapq.heappop(queue)\n\n if distances[p_destination] < p_distance:\n continue\n\n for n_destination in edges[p_destination]:\n distance = p_distance + 1\n if distance < distances[n_destination]:\n distances[n_destination] = distance\n heapq.heappush(queue, [distance, n_destination])\n\n counter = Counter(distances)\n m = max(counter.values())\n return len(list(filter(lambda k: counter[k] == m, counter.keys())))\n\n\nif __name__ == \"__main__\":\n n = 6\n e = [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]]\n print(solution(n, e))\n","repo_name":"vincent-kk/Basic-Algorithm","sub_path":"programmers/lv3/49189.py","file_name":"49189.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"31997688313","text":"# -*- coding UTF-8 _*_\n# Check if countries and zones are sorted\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\ndef compareArrays (length,country_names,sorted_country_names): # Compare sorted and unsorted string arrays of the same length\n for i in (range(length)):\n if country_names[i] != sorted_country_names[i]:\n print(\"Names are not sorted for \", country_names[i], \"(should be as sorted_names \",\n sorted_country_names[i])\n\n# 1. Creating object webdriver\ndriver = webdriver.Chrome()\n\n# 2. Open the page to log in into admin\ndriver.get(\"http://localhost/litecart/admin/\")\ndriver.implicitly_wait(20)\n# 2a. Set waiting time\ndriver.implicitly_wait(30)\n# 2b.Find element with name \"username\"\nusername = driver.find_element_by_name(\"username\")\n# 2c. Type username\nusername.clear()\nusername.send_keys(\"admin\")\n# 2d. Find element with the name \"password\"\npassword = driver.find_element_by_name(\"password\")\n# 2e. Type password\npassword.clear()\npassword.send_keys(\"admin\")\n# 2f. Find element Login\nlogin = driver.find_element_by_name(\"login\")\n# 2g. Click button Login\nlogin.click()\n# 2i. Set waiting time\ndriver.implicitly_wait(20)\n\n# 3. Check that Country Names are sorted on Country page\n# 3a. Find element - link for Countries\ncountries_menu = driver.find_element_by_css_selector(\"ul#box-apps-menu.list-vertical a[href*=countries]\")\ndriver.implicitly_wait(20)\n#print('menu item found')\n# 3b. Click Countries menu item to see page Countries\ncountries_menu.click()\n#print(\"menu clicked\")\n# 3c. Find page headline to be sure that page name is Countries\nheadline = driver.find_element_by_css_selector(\"h1\").text\n# print(\"Headline found = \",headline)\n\n# 4. Find links to all countries on Countries page\ncountry_links = driver.find_elements_by_css_selector(\"tr.row td a\") # Only odd elements are correct, even elements are not related\namount_of_countries = len(country_links)\n# print(\"Amount_of_countries = \", amount_of_countries)\n\n# 5. Find country names and put them in country_names (skipping even elements in country_links)\ncountry_names =[]\nfor i in range(0,amount_of_countries,2):\n country_names.append(country_links[i].text)\n# 5a. Sort country names\nsorted_country_names = sorted(country_names)\n\n# 6. Compare sorted and not sorted lists\ncountries_amount = len(country_names)\n#print(\"Countries_amount =\", countries_amount)\n#print('sorted_country_names =', sorted_country_names)\n#print(\"country names\",country_names)\ncompareArrays(countries_amount,country_names, sorted_country_names)\n\n# 7. Find countries ============ with non zero zones\n# 7a. Find elements countries and for their zones\ndriver.get(\"http://localhost/litecart/admin/?app=countries&doc=countries\")\ndriver.implicitly_wait(20)\n# 7b. Find list of all countries on Country page and amount of those countries\nall_countries = driver.find_elements_by_xpath(\"//*[contains(@class,'row')]/td[5]/a\")\namount_of_all_countries = len(all_countries)\n# print(\"Amount of countries\", amount_of_all_countries)\n# 7b. Find all elements related to country zones and amount of those elements\nall_contries_zones =driver.find_elements_by_xpath(\"//*[contains(@class,'row')]/td[6]\")\nall_contries_codes = driver.find_elements_by_xpath(\"//*[contains(@class,'row')]/td[4]\")\namount_zone_elements = len(all_contries_zones)\n# print(\"amount_zone_element\",amount_zone_elements )\n\n# 7c. Select countries with non-zero zone amount and put them into the list\nmultiple_zones_countries =[]\nmultiple_zones_contries_index =[]\nmultiple_zones_codes =[]\nfor i in range (amount_of_all_countries):\n if all_contries_zones[i].text != '0':\n multiple_zones_countries.append(all_countries[i].text)\n multiple_zones_contries_index.append(i)\n multiple_zones_codes.append(all_contries_codes[i].text)\nprint(\"multiple zones countries = \",multiple_zones_countries)\nprint(\"multiple_zones_contries_index = \", multiple_zones_contries_index)\nprint(\"multiple_zones_codes = \", multiple_zones_codes)\n\n# 8. Find particular ============= country code for the country with multiple zones\nmultiple_zones_codes_length = len(multiple_zones_codes)\n# print(\"multiple_zones_codes = \",multiple_zones_codes)\nfor i in range (multiple_zones_codes_length):\n country_code = multiple_zones_codes[i]\n # print (\" current country code = \",country_code)\n # 8a. Go to ================= the page with zones for particular country\n country_page_locator =\"http://localhost/litecart/admin/?app=countries&doc=edit_country&country_code=\" +country_code\n print(\"country page locator = \", country_page_locator)\n driver.get(country_page_locator)\n driver.implicitly_wait(20)\n # 8b. Find all zone names\n zone_names = driver.find_elements_by_css_selector(\"table.dataTable tr td:nth-of-type(3) input\")\n # print(\"zone names = \", zone_names)\n current_country_zones_length = len(zone_names)- 1\n # print(\"current_country_zones_length\", current_country_zones_length)\n current_country_zones_names =[]\n for i in range(current_country_zones_length):\n # print(\"current zone name = \", zone_names[i].get_attribute(\"value\"))\n current_country_zones_names.append(zone_names[i].get_attribute(\"value\"))\n # print(\"current_country_zones_names = \", current_country_zones_names)\n # print(\"length of current_country_zones_names = \",len(current_country_zones_names))\n\n # 8c. Check zone names sorting for current country\n sorted_current_country_zones_names = sorted(current_country_zones_names)\n # print(\"length of sorted_current_country_zones_names = \",len(sorted_current_country_zones_names))\n # print(\"sorted_current_country_zones_names\", sorted_current_country_zones_names)\n compareArrays(current_country_zones_length,current_country_zones_names,sorted_current_country_zones_names)\n\n# 9. Find link to zones menu\ngeo_zones_menu = driver.find_element_by_css_selector(\"ul#box-apps-menu.list-vertical a[href*=geo_zones]\")\ndriver.implicitly_wait(20)\n# print('menu item found')\n\n# 9a. Click menu item to see page Geo Zones\ngeo_zones_menu.click()\n# print(\"menu clicked\")\n\n# 9b. Find page headline\nheadline = driver.find_element_by_css_selector(\"h1\").text\n# print(\"Headline found = \", headline)\n\n# 10 Find link to Canadian zones\ncanadian_zones = driver.find_element_by_link_text(\"Canada\")\n#print(\"canadian zones\", canadian_zones)\n# 10a. Go to the page with Canadian zones\ncanadian_zones.click()\n# 10b. Find page headline\nheadline = driver.find_element_by_css_selector(\"h1\").text\n# print(\"Headline found = \", headline)\n\n# 10c. Find list of Canadian zones\ncanadian_zones_list = driver.find_elements_by_css_selector(\"select[name*=zone_code] option[selected*=selected]\")\n#print(\"Canadian_zones_list\",canadian_zones_list)\n\n# 10d. Find amount of Canadian zones\ncanadian_zones_amount = len(canadian_zones_list)\n# print(\"Canadian_zones_amount\",canadian_zones_amount)\n\n# 11. Find Canadian zone names\ncanadian_zones_names =[]\nfor i in range(canadian_zones_amount):\n canadian_zones_names.append(canadian_zones_list[i].text)\n# print(\"Canadian_zones_names\",canadian_zones_names)\n\n# 11a. Sort Canadian zones names\nsorted_canadian_zones_names = sorted(canadian_zones_names)\n# print(\"Sorted Canadian_zones_names\",sorted_canadian_zones_names)\n\n# 11b. Check that zone names are sorted\ncompareArrays(canadian_zones_amount,canadian_zones_names, sorted_canadian_zones_names)\n\n# 12 Checking if US zone names are sorted\n# 12a. Find link to geo-zones page\ngeo_zones_menu = driver.find_element_by_css_selector(\"ul#box-apps-menu.list-vertical a[href*=geo_zones]\")\ndriver.implicitly_wait(20)\n# 12b. Go to the gep-zones page\ngeo_zones_menu.click()\n\n# 12c. Find page headline\nheadline = driver.find_element_by_css_selector(\"h1\").text\n# print(\"Headline found = \", headline)\n\n# 13. Find a link to US zones\nus_zones = driver.find_elements_by_css_selector(\"table.dataTable tr.row a[href*=id]\")[2]\n# print (\"US link = \", us_zones.text)\n#print(\"us zones\", us_zones)\n\n# 13a. Go to the page with US zones\nus_zones.click()\n\n# 14. Find list of US zones\nus_zones_list = driver.find_elements_by_css_selector(\"select[name*=zone_code] option[selected*=selected]\")\n#print(\"US_zones_list\",us_zones_list)\n\n# 14a. Find amount of US zones\nus_zones_amount = len(us_zones_list)\n# print(\"US_zones_amount\",us_zones_amount)\n\n# 15. Find US zone names\nus_zones_names =[]\nfor i in range(us_zones_amount):\n us_zones_names.append(us_zones_list[i].text)\n#print(\"US_zones_names\",us_zones_names)\n\n# 15a. Sort US zones names\nsorted_us_zones_names = sorted(us_zones_names)\n#print(\"Sorted UD_zones_names\",sorted_us_zones_names)\n\n# 16. Check that zone names are sorted\ncompareArrays(us_zones_amount,us_zones_names, sorted_us_zones_names)\n\n\n# 17. Close the page\n# driver.close()\n\n# 18. Close the browser\n# driver.quit()","repo_name":"Olga177/Hello2","sub_path":"sorting_9a.py","file_name":"sorting_9a.py","file_ext":"py","file_size_in_byte":8919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"28842581847","text":"from simulation.beam_simulation.get_window import get_window\n\n\ndef calc_spatial_window(control,\n window,\n annular_transducer,\n num_points_x,\n num_points_y,\n resolution_x,\n resolution_y,\n step_size):\n if window is None:\n _window = control.simulation.num_windows\n else:\n _window = window\n if isinstance(_window, int) and _window > 0:\n _window = get_window((num_points_x, num_points_y),\n (resolution_x, resolution_y),\n _window * step_size,\n 2 * step_size,\n annular_transducer)\n\n return _window\n","repo_name":"normalform/abersimpy","sub_path":"src/simulation/beam_simulation/calc_spatial_window.py","file_name":"calc_spatial_window.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"10725620145","text":"import sys\n\nclass PageTable:\n\n def __init__(self, frames, pra):\n self.pageTable = []\n self.pageSize = 256\n self.frames = 256\n self.alg = pra\n self.initPageTable()\n\n # Initializes the page table to A tuple of 0's.\n def initPageTable(self):\n # iterate through page size\n for x in range(self.pageSize):\n self.pageTable.insert(x, 0)\n\n # Add frame number to the pageTable\n # pageNum = index to add frame\n # frame = data\n def addToTable(self, frame, pageNum):\n # adding frame into pageTable\n self.pageTable[pageNum] = frame\n\n def deleteFrame(self, frame):\n idx = 0\n # print(\"Trying to delete from page tble\")\n for val in self.pageTable:\n if val is frame:\n # print(\"Deleting at idx %d\"%idx)\n self.pageTable[idx] = 0\n break\n idx += 1\n\n def printTable(self):\n idx = 0\n for x in self.pageTable:\n if x == 0:\n print(\"Page Number: \", idx, \" Frame: \", 0)\n else:\n print(\"Page Number: \", idx, \" Frame: \", self.parsMyFrame(x))\n idx += 1\n\n\n # Function to check if page num exists in page table.\n # Returns True if it exists in page table\n # returns False if it doesn't exist in page table\n def isInPageTable(self, pageNum):\n # pull tuple frame from pageTable\n frame = self.pageTable[pageNum]\n # Check to see if frame is valid\n if(frame != 0):\n # if valid flag, frame in pageTable\n return True\n # else frame not in pageTable\n return False\n\n def frameInPageTable(self, frame):\n for targetFrame in self.pageTable:\n if targetFrame == frame:\n return True\n return False\n\n def parsMyFrame(self, frame):\n fullFrame = hex(int.from_bytes(frame, byteorder='big', signed=True))\n # Length of the frames - '0x'\n frameLen = len(fullFrame) - 2\n # remove the 0x from the start\n parsedFrame = fullFrame[2:]\n # add leading 0's if frame is smaller than 512 from_bytes\n if frameLen < 512:\n parsedFrame = '0'*(512 - frameLen) + parsedFrame\n # Uppercase the letters.\n return parsedFrame.upper()\n\n def pageFaultHandler(self, tlb, memory, pageNum, pageOff):\n # Open up the backing store .bin file\n backingStore = open(\"BACKING_STORE.bin\", \"rb\")\n # Find the position you want to read from the file\n backingStore.seek(self.frames * pageNum)\n # Read and obtain the frame.\n freeFrame = backingStore.read(self.frames)\n test = self.parsMyFrame(freeFrame)\n # Check to see if free fram does not exist in memory\n if not memory.frameInMemory(freeFrame):\n # find a free idx if there is space in the memory\n freeFrameIdx = memory.findFreeFrame()\n # If free frame is available, use it\n if freeFrameIdx != -1:\n # print(\"Frame not in memory\")\n # add frame to memory\n memory.addFrame(freeFrameIdx, freeFrame)\n # if no free frame, use page replacement alg\n else:\n # print(\"FIFO\")\n # TODO page replacement algorithm\n if self.alg == \"LRU\":\n self.LRUPageReplacement(memory, tlb, freeFrame)\n elif self.alg == \"OPT\":\n self.OPTPageReplacment(memory, tlb, freeFrame)\n else:\n # memory.printMemory()\n self.FIFOPageReplacment(memory, tlb, freeFrame)\n\n # print(\"\\n adding pageNum {} and free frame {}\\n\".format(pageNum, test))\n # Add new frame to the page table\n self.addToTable(freeFrame, pageNum)\n\n if tlb.isTLBFull():\n tlb.freeTLBFrame()\n\n tlb.addToTLB(pageNum, freeFrame)\n backingStore.close()\n\n def getFrameFromTable(self, pageNum):\n return self.pageTable[pageNum]\n\n def LRUPageReplacement(self, memory, tlb, freeFrame):\n # memory.printCache()\n val = memory.leastRecentlyUsed()\n self.deleteFrame(val[1])\n tlb.deleteFrameFromTLB(val[1])\n\n memory.addFrame(val[0], freeFrame)\n\n def OPTPageReplacment(self, memory, tlb, freeFrame):\n idx, frame = self.optimalPop(memory)\n # delete frame frome pageTable\n self.deleteFrame(frame)\n tlb.deleteFrameFromTLB(frame)\n # Get the free memeory block\n # idx = memory.findFreeFrame()\n # add frame to memory\n memory.addFrame(idx, freeFrame)\n pass\n\n def optimalPop(self, memory):\n max = -1\n toPop = -1\n for i in range(memory.memorySize):\n if memory.getFrame(i) != 0:\n targetFrame = memory.getFrame(i)\n\n if self.frameInPageTable(targetFrame):\n pageNumfound = i\n if pageNumfound > max:\n max = pageNumfound\n toPop = pageNumfound\n else:\n memory.deleteFrame(i)\n return i, targetFrame\n\n frame = memory.getFrame(toPop)\n memory.deleteFrame(toPop)\n return toPop, frame\n\n def FIFOPageReplacment(self, memory, tlb, freeFrame):\n\n # Remove the first item of memory and shift everything over\n frame = memory.shiftMemory()\n # print(\"\\n Frame to be deleted {}\\n\".format(self.parsMyFrame(frame)))\n # delete frame frome pageTable\n self.deleteFrame(frame)\n tlb.deleteFrameFromTLB(frame)\n # Get the free memeory block\n idx = memory.findFreeFrame()\n # add frame to memory\n memory.addFrame(idx, freeFrame)\n","repo_name":"festevez1995/CPE453","sub_path":"Program3/PageTable.py","file_name":"PageTable.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"16214382313","text":"# Daniel Lu\r\n# 9/12/20\r\n\r\n# This function determines whether the sum of two inputs is greater than, less than, or equal to 10.\r\n\r\nx = int(input(\"Please enter a number: \"))\r\ny = int(input(\"Please enter another number: \"))\r\nsum1 = (x + y)\r\n\r\nif sum1 < 10:\r\n print (sum1, \"is less than 10\")\r\nelif sum1 == 10:\r\n print (sum1, \"is equal to 10\")\r\nelse:\r\n print (sum1, \"is greater than 10\")","repo_name":"dlu270/Module-8","sub_path":"M8P2.py","file_name":"M8P2.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15307333147","text":"import daisy\nfrom daisy.persistence import MongoDbGraphProvider\nimport json\nimport logging\nimport numpy as np\nimport os\nimport sys\nimport time\nsys.path.append(\"../\")\nfrom micron.graph.daisy_check_functions import check_function, write_done\nfrom solver import Solver\nimport configparser\nfrom micron import read_solve_config, read_predict_config, read_data_config, read_worker_config, read_graph_config\nfrom funlib.run import run, run_singularity\n\nlogger = logging.getLogger(__name__)\n\ndef solve(\n predict_config,\n worker_config,\n data_config,\n graph_config,\n solve_config,\n num_block_workers,\n block_size,\n roi_offset,\n roi_size,\n context,\n solve_block,\n base_dir,\n experiment,\n train_number,\n predict_number,\n graph_number,\n solve_number,\n queue,\n singularity_container,\n mount_dirs,\n **kwargs):\n\n source_roi = daisy.Roi(daisy.Coordinate(roi_offset), daisy.Coordinate(roi_size))\n\n solve_setup_dir = os.path.join(os.path.join(base_dir, experiment), \"04_solve/setup_t{}_p{}_g{}_s{}\".format(train_number,\n predict_number,\n graph_number,\n solve_number))\n\n block_write_roi = daisy.Roi(\n (0, 0, 0),\n block_size)\n block_read_roi = block_write_roi.grow(\n context,\n context)\n total_roi = source_roi.grow(\n context,\n context)\n\n logger.info(\"Solving in %s\", total_roi)\n\n daisy.run_blockwise(\n total_roi,\n block_read_roi,\n block_write_roi,\n process_function=lambda: start_worker(predict_config,\n worker_config,\n data_config,\n graph_config,\n solve_config,\n queue,\n singularity_container,\n mount_dirs,\n solve_block,\n solve_setup_dir),\n num_workers=num_block_workers,\n fit='shrink')\n\n logger.info(\"Finished solving, parameters id is %s\", solve_number)\n\ndef start_worker(predict_config,\n worker_config,\n data_config,\n graph_config,\n solve_config,\n queue,\n singularity_container,\n mount_dirs,\n solve_block,\n solve_setup_dir):\n\n \n worker_id = daisy.Context.from_env().worker_id\n\n log_out = os.path.join(solve_setup_dir, '{}_worker.out'.format(worker_id))\n log_err = os.path.join(solve_setup_dir, '{}_worker.err'.format(worker_id))\n\n base_command = \"python -u {} {} {} {} {} {}\".format(solve_block,\n predict_config,\n worker_config,\n data_config,\n graph_config,\n solve_config)\n if queue == \"None\":\n logger.warning(\"Running block **locally**, no queue provided.\")\n if singularity_container == \"None\":\n logger.warning(\"Running block in current environment, no singularity image provided.\")\n cmd = [base_command]\n else:\n cmd = run_singularity(base_command,\n singularity_container,\n mount_dirs=mount_dirs,\n execute=False,\n expand=False)\n else:\n logger.info(\"Running block on queue {} and container {}\".format(queue,\n singularity_container))\n cmd = run(command=base_command,\n queue=queue,\n num_gpus=0,\n num_cpus=1,\n singularity_image=singularity_container,\n mount_dirs=mount_dirs,\n execute=False,\n expand=False,\n batch=True)\n\n daisy.call(cmd, log_out=log_out, log_err=log_err)\n\n logger.info('Solve worker finished')\n\nif __name__ == \"__main__\":\n logger = logging.getLogger(__name__)\n out_hdlr = logging.StreamHandler(sys.stdout)\n out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))\n out_hdlr.setLevel(logging.INFO)\n logger.addHandler(out_hdlr)\n logger.setLevel(logging.INFO)\n\n predict_config = sys.argv[1]\n worker_config = sys.argv[2]\n data_config = sys.argv[3]\n graph_config = sys.argv[4]\n solve_config = sys.argv[5]\n\n predict_config_dict = read_predict_config(predict_config)\n worker_config_dict = read_worker_config(worker_config)\n data_config_dict = read_data_config(data_config)\n graph_config_dict = read_graph_config(graph_config)\n solve_config_dict = read_solve_config(solve_config)\n\n print(\"solve_config_dict\", solve_config_dict)\n\n full_config = predict_config_dict\n full_config.update(worker_config_dict)\n full_config.update(data_config_dict)\n full_config.update(graph_config_dict)\n full_config.update(solve_config_dict)\n\n full_config[\"roi_offset\"] = full_config[\"in_offset\"]\n full_config[\"roi_size\"] = full_config[\"in_size\"]\n full_config[\"predict_config\"] = predict_config\n full_config[\"worker_config\"] = worker_config\n full_config[\"data_config\"] = data_config\n full_config[\"graph_config\"] = graph_config\n full_config[\"solve_config\"] = solve_config\n\n start_time = time.time()\n solve(**full_config)\n print(\"Solving took {} seconds\".format(time.time() - start_time))\n","repo_name":"nilsec/micron","sub_path":"micron/solve/daisy_solve.py","file_name":"daisy_solve.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"44"} +{"seq_id":"73665281731","text":"\"\"\"empty message\n\nRevision ID: c1d95cf2b31b\nRevises: 1515ecd33ef9\nCreate Date: 2022-11-04 05:19:14.502800\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c1d95cf2b31b'\ndown_revision = '1515ecd33ef9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('freeroutine_table',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('routine_name', sa.String(length=120), nullable=False),\n sa.Column('description', sa.String(length=500), nullable=True),\n sa.Column('burnt_calories', sa.String(length=120), nullable=False),\n sa.Column('routine_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['routine_id'], ['muscles_table.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('description'),\n sa.UniqueConstraint('routine_name')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('freeroutine_table')\n # ### end Alembic commands ###\n","repo_name":"d-sadovnik/Proyecto-Final-DHM2","sub_path":"migrations/versions/c1d95cf2b31b_.py","file_name":"c1d95cf2b31b_.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"44"} +{"seq_id":"12052180282","text":"\r\nimport json\r\nfrom src.holodex import Entry, HoloDex, ORGS\r\nfrom random import choice\r\nfrom collections import namedtuple\r\n\r\nclass VtuberEntry:\r\n def __init__(self, **data):\r\n self.name: str = None\r\n self.org: str = None\r\n\r\n self.photo: str = None\r\n\r\n self.attack: int = 0\r\n self.defense: int = 0\r\n self.speed: int = 0\r\n\r\n self.health: int = 0\r\n self.stamina: int = 0\r\n self.stamina_regen: int = 0\r\n self.mana: int = 0\r\n self.mana_regen: int = 0\r\n\r\n self.abilities = []\r\n self.card_id: int = 0\r\n\r\n for key in data.keys():\r\n setattr(self, key, data[key])\r\n\r\n def out(self):\r\n return {\r\n \"name\": self.name,\r\n \"org\": self.org,\r\n \"photo\": self.photo,\r\n \"attack\": self.attack,\r\n \"defense\": self.defense,\r\n \"health\": self.health,\r\n \"stamina\": self.stamina,\r\n \"stamina_regen\": self.stamina_regen,\r\n \"mana\": self.mana,\r\n \"mana_regen\": self.mana_regen,\r\n \"abilities\": self.abilities\r\n }\r\n\r\n\r\nCard = namedtuple(\"Card\", [\"owner\", \"original_id\", 'id'])\r\n\r\n\r\ndef update_database():\r\n dex = HoloDex()\r\n\r\n values = []\r\n\r\n for i in range(10):\r\n data = dex.get_channels(org=ORGS.ALL, offset=i*100)\r\n values += data\r\n\r\n with open('./holodex.db', 'a') as a:\r\n json.dump(values, a, indent=1)\r\n\r\n\r\ndef read():\r\n with open(\"./holodex.db\", 'r') as r:\r\n data = json.load(r)\r\n return data\r\n\r\ndef get_randoms(count:int=1) -> list[Entry]:\r\n values = read().copy()\r\n out = []\r\n for i in range(count):\r\n value = choice(values)\r\n values.remove(value)\r\n out.append(value)\r\n return [Entry(**x) for x in out]\r\n","repo_name":"EnokiUN/Simp","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"21118542663","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport dash_leaflet as dl\nimport dash_leaflet.express as dlx\nfrom dash import Dash, html\n\napp = Dash()\napp.layout = html.Div(\n [\n dl.Map(\n [\n dl.TileLayer(),\n # From in-memory geojson. All markers at same point forces spiderfy at any zoom level.\n dl.GeoJSON(\n data=dlx.dicts_to_geojson([dict(lat=-37.8, lon=175.6)] * 100), cluster=True\n ),\n # From hosted asset (best performance).\n dl.GeoJSON(\n url=\"assets/leaflet_50k.pbf\",\n format=\"geobuf\",\n cluster=True,\n id=\"sc\",\n zoomToBoundsOnClick=True,\n superClusterOptions={\"radius\": 100},\n ),\n ],\n center=(-37.75, 175.4),\n zoom=11,\n style={\"width\": \"100hw\", \"height\": \"97vh\", \"margin\": \"auto\", \"display\": \"block\"},\n ),\n ]\n)\n\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\", debug=True)\n\n","repo_name":"rosaldo/Dash_POC","sub_path":"map_lea_v4.py","file_name":"map_lea_v4.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17870303188","text":"from queue import Queue\n\n\ndef interleave(s):\n q = Queue()\n cnt = 0\n for _ in range(len(s) - 1):\n q.put(s.pop())\n while True:\n if q.empty():\n break\n s.append(q.get())\n time = q.qsize()\n for _ in range(time):\n s.append(q.get())\n for _ in range(time):\n q.put(s.pop())\n return s\n\n\nif __name__ == '__main__':\n s = [i for i in range(1, 6)]\n print(interleave(s))\n s = [i for i in range(1, 5)]\n print(interleave(s))\n","repo_name":"KleinTong/Daily-Coding-Problem-Solution","sub_path":"interleave_stack/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"18928589449","text":"import sys,os,logging,glob,pickle,torch,joblib,random,string\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd \nimport transformers\nfrom sklearn.model_selection import train_test_split\n#from sklearn.datasets import fetch_20newsgroups\nfrom transformers import AutoTokenizer\nimport transformers\n\nPATH_SCRATCH_CACHE = \"/scratch/w/wluyliu/yananc/cache\"\ntokenizer_bert = AutoTokenizer.from_pretrained('bert-base-cased',cache_dir=PATH_SCRATCH_CACHE, local_files_only=True)\n\ndef truncate(sent, max_length):\n ids = tokenizer_bert.encode(sent, truncation=True, max_length=max_length)\n sent_ = tokenizer_bert.decode(ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n return sent_\n\ndef noisy_label(l, unique_labels):\n unique_labels_ = [j for j in unique_labels if j!=l]\n assert l not in unique_labels_\n return random.sample(unique_labels_, 1)[0]\n \ndef remove_str(sent):\n rml = ['(AP)', '(Reuters)', '(Canadian Press)', '<b>...</b>', '(AFP)', '(washingtonpost.com)', \\\n '(NewsFactor)', '(USATODAY.com)', '(Ziff Davis)', '#39;', '', '', '', \\\n '[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]', '\\n', '\\t' , 'p & gt ;', '& lt ;', '\\\\']\n for word in rml:\n sent = sent.replace(word,'')\n return sent.strip(string.punctuation).strip()\n \ncap = 600\n\ndef sample_stratify(df, samplecnt):\n if samplecnt < 0:\n return df \n ll = []\n for cate in df['label'].unique():\n dfs = df.loc[df['label']==cate].sample(samplecnt)\n ll.append(dfs)\n return pd.concat(ll).sample(frac=1)\n\nclass load_data():\n def __init__(self, samplecnt = -1, dataset='yahoo', samplecnt_test=10000, path='./torch_ds'):\n self.samplecnt = samplecnt\n self.dataset = dataset\n self.path = path\n self.samplecnt_test = samplecnt_test\n\n if self.dataset in ['ag','yahoo', 'agt', 'agp']:\n\n if self.dataset == 'agt':\n self.df_train = pd.read_csv('{}/ag_train_kaggle.csv'.format(self.path))\n self.df_test = pd.read_csv('{}/ag_test_kaggle.csv'.format(self.path)) \n del self.df_train['Description'], self.df_test['Description']\n self.df_train = self.df_train.rename(columns={'Title': 'content'}).rename(columns={'Class Index': 'label'}) \n self.df_test = self.df_test.rename(columns={'Title': 'content'}).rename(columns={'Class Index': 'label'}) \n \n elif self.dataset in ['ag', 'agp']:\n self.df_train = pd.read_csv('{}/{}_train.csv'.format(self.path, 'ag'))\n self.df_test = pd.read_csv('{}/{}_test.csv'.format(self.path, 'ag'))\n\n elif self.dataset == 'yahoo':\n self.df_train = pd.read_csv('{}/{}_train.csv'.format(self.path, 'yahoo'))\n self.df_test = pd.read_csv('{}/{}_test.csv'.format(self.path, 'yahoo')) \n \n if self.dataset in ['ag', 'agt']:\n world_replace = ' '.join(['Politics','War','Military','Terrorism','Election','Finance',\\\n 'Crime','Murder','Religion','Jurisdiction', 'Democracy'])\n ixl = {1:'World', 2:\"Sports\", 3:\"Business\", 4:\"science and technology\"} \n \n elif self.dataset == 'agp':\n ixl = {1:'Politics and Law', 2:\"Sports\", 3:\"Business\", 4:\"science and technology\"} \n\n elif self.dataset == 'yahoo':\n ixl = { 1: 'Society and Culture',\n 2: 'Science and Mathematics',\n 3: 'Health',\n 4: 'Education and Reference',\n 5: 'Computers and Internet',\n 6: 'Sports',\n 7: 'Business and Finance',\n 8: 'Entertainment and Music',\n 9: 'Family and Relationships',\n 10: 'Politics and Government'}\n self.df_train['label_name'] = self.df_train['label'].map(lambda x: ixl.get(x))\n self.df_test['label_name'] = self.df_test['label'].map(lambda x: ixl.get(x))\n self.df_train['label'] = self.df_train['label'] - 1\n self.df_test['label'] = self.df_test['label'] - 1\n\n\n\n elif self.dataset == 'stsa':\n self.df_train = pd.read_csv(\"{}/stsa/train.tsv\".format(self.path), sep='\\t', header=None, names=['label', 'content'])\n self.df_test = pd.read_csv(\"{}/stsa/test.tsv\".format(self.path), sep='\\t', header=None, names=['label', 'content'])\n self.df_dev = pd.read_csv(\"{}/stsa/dev.tsv\".format(self.path), sep='\\t', header=None, names=['label', 'content'])\n self.df_test = pd.concat([self.df_dev, self.df_test])\n\n self.df_train['label_name'] = self.df_train['label'].map(lambda x: x.lower())\n self.df_test['label_name'] = self.df_test['label'].map(lambda x: x.lower())\n\n self.df_train['label'] = self.df_train['label'].map({'Negative':0, 'Positive':1})\n self.df_test['label'] = self.df_test['label'].map({'Negative':0, 'Positive':1})\n \n elif self.dataset in ['yelp2','amazon2', 'yelp5', 'amazon5']:\n self.df_train = pd.read_csv('{}/{}_train.csv'.format(self.path, self.dataset))\n self.df_test = pd.read_csv('{}/{}_test.csv'.format(self.path, self.dataset))\n ixl = {1:'negative', 2:'positive'}\n self.df_train['label_name'] = self.df_train['label'].map(lambda x: ixl.get(x))\n self.df_test['label_name'] = self.df_test['label'].map(lambda x: ixl.get(x))\n self.df_train['label'] = self.df_train['label'] - 1\n self.df_test['label'] = self.df_test['label'] - 1\n\n elif self.dataset == 'imdb':\n self.df_train = pd.read_csv('{}/{}_train.csv'.format(self.path, self.dataset))\n self.df_test = pd.read_csv('{}/{}_test.csv'.format(self.path, self.dataset))\n ixl = {'neg':'negative', 'pos':'positive'} \n self.df_train['label_name'] = self.df_train['label'].map(lambda x: ixl.get(x))\n self.df_test['label_name'] = self.df_test['label'].map(lambda x: ixl.get(x))\n self.df_train['label'] = self.df_train['label'].map({'neg':0, 'pos':1})\n self.df_test['label'] = self.df_test['label'].map({'neg':0, 'pos':1})\n\n elif self.dataset == 'uci':\n df = pd.read_csv(\"{}/uci-news-aggregator.csv\".format(self.path)) \n df = df[['CATEGORY','TITLE']]\n df.rename(\n columns={\"CATEGORY\": \"label\", \"TITLE\":\"content\"},\n inplace=True )\n ld = {'e':'entertainment', 'b':'business', 't':\"science and technology\", 'm':\"health\"}\n ixl = {'e':0, 'b':1, 't':2, 'm':3}\n df['label_name'] = df['label'].map(lambda x: ld[x])\n df['label'] = df['label'].map(lambda x: ixl[x])\n self.df_train, self.df_test = train_test_split(df, test_size=0.3, random_state=333)\n\n elif self.dataset == 'nyt':\n infos = []\n with open('{}/nyt/dataset.txt'.format(self.path),'r') as f:\n for line in f:\n infos.append(line.strip())\n\n labels = []\n with open('{}/nyt/labels.txt'.format(self.path),'r') as f:\n for line in f:\n labels.append(int(line.strip()))\n\n df = pd.DataFrame(zip(infos, labels), columns=['content','label'])\n\n names = []\n with open('{}/nyt/classes.txt'.format(self.path),'r') as f:\n for line in f:\n names.append(line.strip())\n ixl = {ix:l for ix, l in enumerate(names)}\n df['label_name'] = df['label'].map(lambda x: ixl[x])\n\n self.df_train, self.df_test = train_test_split(df, test_size=0.3, random_state=333)\n\n else:\n raise KeyError(\"dsn illegal!\") \n\n self.df_train = sample_stratify(self.df_train, self.samplecnt)\n if self.samplecnt_test > 0:\n self.df_test = self.df_test.sample(min(self.df_test.shape[0], self.samplecnt_test))\n\n\n\n'''\nnyt\nbusiness 7639\npolitics 7182\nsports 2048\nhealth 1656\neducation 1255\nestate 1135\narts 840\nscience 349\ntechnology 293\n'''\n\n \n\n\n\n\nimport datasets\ndef get_cc_news(s=1):\n cc_news = datasets.load_dataset('cc_news', split=\"train\", cache_dir='/scratch/w/wluyliu/yananc/cache')\n '''\n Dataset({\n features: ['date', 'description', 'domain', 'image_url', 'text', 'title', 'url'],\n num_rows: 708241\n })\n '''\n df = pd.DataFrame(zip(cc_news['title'], cc_news['text'], cc_news['description'] ))\n df.columns = ['title','content','description']\n\n df.drop_duplicates(['title','content'], inplace=True) # 708241\n\n return df.sample(frac=s) #615019 \n\n\n\n\ndef get_cc_text_double(ft_pattern, dsn, s=1):\n\n if dsn == 'cc':\n df_cc = get_cc_news(s)\n df_cc = df_cc.loc[(df_cc['title']!='') & (df_cc['content']!='') & (~df_cc['title'].isnull()) & (~df_cc['content'].isnull())]\n df_train, df_test = train_test_split(df_cc, test_size=0.05)\n\n elif dsn == 'c4':\n c4_news = datasets.load_dataset('c4', \"realnewslike\", cache_dir='/scratch/w/wluyliu/yananc/cache')\n df_train = pd.DataFrame(c4_news['train']['text'], columns=['content']) # 13799838\n df_test = pd.DataFrame(c4_news['validation']['text'], columns=['content'])\n\n if ft_pattern == 'tc' and dsn == 'cc':\n return df_train.rename(columns={'title': 'text1'}).rename(columns={'content': 'text2'})[['text1','text2']],\\\n df_test.rename(columns={'title': 'text1'}).rename(columns={'content': 'text2'})[['text1','text2']],\\\n \n elif ft_pattern == 'pp':\n rr_train = df_train['content'].map(lambda x: para_split2(x, False)).tolist()\n rr_test = df_test['content'].map(lambda x: para_split2(x, False)).tolist()\n\n elif ft_pattern == 'ss':\n rr_train = df_train['content'].map(lambda x: para_split2(x, True)).tolist()\n rr_test = df_test['content'].map(lambda x: para_split2(x, True)).tolist()\n\n df_text2text_train = pd.DataFrame([r for r in rr_train if r], columns=['text1', 'text2'])\n df_text2text_test = pd.DataFrame([r for r in rr_test if r], columns=['text1', 'text2'])\n\n return df_text2text_train, df_text2text_test\n\n\n\n\n\n\n'''\n def get_tweet(self):\n files = glob.glob(\"../datasets_aug/tweetraw/*.txt\")\n infos = []\n for f in files:\n label = f.split('/')[-1].replace('.txt','')\n with open(f,'r') as ff:\n for line in ff:\n infos.append((label, line.strip()))\n df = pd.DataFrame(infos, columns=['label','content'])\n df_train, df_test = train_test_split(df, test_size=0.2)\n\n\n\n\n # bbc \n def get_bbc_news(self):\n infos = []\n for cate in ['business', 'entertainment', 'politics', 'sport', 'tech']:\n files = glob.glob(\"../datasets_aug/bbc/{}/*.txt\".format(cate))\n for ff in files:\n with open(ff, 'r', errors='ignore') as f :\n content = f.read()\n infos.append((content, cate)) \n df = pd.DataFrame(infos, columns=['content', 'label'])\n df['label'] = df['label'].map(lambda x: 'technology' if x=='tech' else x)\n\n df_train, df_test = train_test_split(df, test_size=0.5)\n df_train = sample_stratify(df_train, self.samplecnt, self.seed)\n return df_train, df_test, df.sample(frac=1)\n\n # bbc sports\n def get_bbcsports_news(self):\n infos = []\n for cate in ['athletics', 'cricket', 'football', 'rugby', 'tennis']:\n files = glob.glob(\"../datasets_aug/bbcsport/{}/*.txt\".format(cate))\n for ff in files:\n with open(ff, 'r', errors='ignore') as f :\n content = f.read()\n infos.append((content, cate)) \n df = pd.DataFrame(infos, columns=['content', 'label'])\n df_train, df_test = train_test_split(df, test_size=0.5)\n df_train = sample_stratify(df_train, self.samplecnt, self.seed)\n return df_train, df_test, df.sample(frac=1)\n\n def get_pop_news(self):\n df_train = pd.read_csv(\"../datasets_aug/pop_news/train_file.csv\") \n df_test = pd.read_csv(\"../datasets_aug/pop_news/test_file.csv\")\n df_train = df_train[['Headline','Title','Topic']]\n df_test = df_test[['Headline','Title','Topic']]\n df_train['content'] = df_train['Headline'] + ' ' + df_train['Title']\n df_test['content'] = df_test['Headline'] + ' ' + df_test['Title'] \n del df_train['Headline'], df_train['Title'], df_test['Headline'],df_test['Title'] \n df_train.rename(\n columns={\"Topic\": \"label\"},\n inplace=True )\n df_test.rename(\n columns={\"Topic\": \"label\"},\n inplace=True ) \n df = pd.concat([df_train, df_test]).sample(frac=1)\n df_train = sample_stratify(df_train, self.samplecnt, self.seed) \n return df_train, df_test, df\n\n def get_20_news(self):\n label_name_map = {\n 'rec.autos':'autos automobile', \n 'comp.sys.mac.hardware':'computer system mac hardware', \n 'comp.graphics':'computer graphics', \n 'sci.space': 'science space',\n 'talk.politics.guns':'politics guns', \n 'sci.med':'science medicine illness disease', \n 'comp.sys.ibm.pc.hardware': 'computer system ibm pc hardware',\n 'comp.os.ms-windows.misc':'computer os microsoft windows', \n 'rec.motorcycles': 'motorcycles', \n 'talk.religion.misc':'religion',\n 'misc.forsale':'forsale for sale', \n 'alt.atheism':'atheism', \n 'sci.electronics':'science electronics', \n 'comp.windows.x':'computer windows x',\n 'rec.sport.hockey':'sport hockey', \n 'rec.sport.baseball':'sport baseball', \n 'soc.religion.christian':'religion christian',\n 'talk.politics.mideast':'politics middle east', \n 'talk.politics.misc':'politics', \n 'sci.crypt':'science encryption'\n }\n #data_train = fetch_20newsgroups(subset='train',shuffle=True)\n #joblib.dump(data_train, '20news_data_train')\n data_train = joblib.load('../datasets_aug/20newsgroups/20news_data_train')\n df_train = pd.DataFrame(zip(data_train['data'], list(data_train['target'])), columns=['content','label'])\n ixl = {ix:n for ix, n in enumerate(data_train['target_names'])}\n df_train['label'] = df_train['label'].map(lambda x: label_name_map[ixl[x]])\n\n #data_test = fetch_20newsgroups(subset='test',shuffle=True)\n #joblib.dump(data_test, '20news_data_test')\n data_test = joblib.load('../datasets_aug/20newsgroups/20news_data_test')\n df_test = pd.DataFrame(zip(data_test['data'], list(data_test['target'])), columns=['content','label'])\n ixl = {ix:n for ix, n in enumerate(data_test['target_names'])}\n df_test['label'] = df_test['label'].map(lambda x: label_name_map[ixl[x]])\n df = pd.concat([df_train, df_test]).sample(frac=1)\n df_train = sample_stratify(df_train, self.samplecnt, self.seed) \n return df_train, df_test, df \n \n\n \n\n'''\n\n\n\nimport datetime,csv\ndef record_log(file, record):\n #cur = datetime.datetime.strftime(datetime.datetime.now() + datetime.timedelta(hours=8), '%Y-%m-%d %H:%M:%S')\n cur = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')\n with open(file, 'a') as f:\n writer = csv.writer(f, delimiter=' ')\n writer.writerow([cur] + record)\n\ndef get_tokens_len(ds, cap3rd):\n lens = []\n for content in ds.df_train['content'].tolist():\n tokens = tokenizer_bert.tokenize(content)\n lens.append(len(tokens))\n return int(np.quantile(np.array(lens), cap3rd, axis=0))\n\n\n\ndef process_ds(ds, maxlen=256, truncate_testset=False):\n # ds.df_train['content'] = ds.df_train['content']\\\n # .map(lambda x: x.replace('
',' '))\n #if not transformers.__version__.startswith('2.'):\n ds.df_train['content'] = ds.df_train['content'].map(lambda x: truncate(x, maxlen))\n if truncate_testset:\n ds.df_test['content'] = ds.df_test['content'].map(lambda x: truncate(x, maxlen))\n proper_len = get_tokens_len(ds, 0.9)\n return ds, proper_len\n\n\n\nbase_nli ={\n 'politics':['Politics','War', 'Election','Constitution','Democracy','Conflict','Military',\\\n 'Terrorism', 'Government', 'Ideology', 'fascism', 'Socialism', 'Totalitarian', 'Religion'],\n 'law': ['Law', 'Legitimacy','Court','Crime','Murder','Jurisdiction'],\n 'science': ['Science','Aerospace','Physics','Chemistry','Biology','Scientist','Astronomy','Universe','Big Bang'],\n 'technology':['Technology','Biotech', 'IT','Computers','Internet','Algorithm','Space','Bitcoin','artificial Intelligence','Robot'],\n 'health': ['Health','Healthcare','Medicine','Clinics','Vaccine','Wellness','Nutrition','Dental','HIV','Disease'],\n 'business': ['Business','Finance','Oil price','Supply','Inflation','Dollars','Bank','Wall Street','Bitcoin',\n 'Federal Reserve','Accrual','Accountancy','Sluggishness','Consumerism','Trade','Quarterly earnings',\\\n 'Deposit','Revenue','Stocks','Recapitalization','Marketing','Futures'],\n 'sports': ['Sports','Athletics','Championships','Football','Olympic','Tournament','Chelsea','League','Golf',\n 'NFL','Super bowl','World Cup'],\n 'entertainment':['Entertainment','Pop music','Film','Music','Reality show','Drama','Concert','Rock music','Opera'],\n 'education': ['Education', 'Tertiary education', 'University','Curriculum','Lecture'],\n 'arts': ['Arts','Music','Painting','Art galleries','Classical music','Art Works','Stitchery'],\n 'estate': ['Estate','Estate tax','Real estate']\n}\n'''\nfor l, ll in base_nli.items():\n print(\"{}:{}\".format(l, ','.join(ll)))\n\n'''\n\n\nexpand_label_nli = {}\n\n# ag\nexpand_label_nli['World'] = base_nli['politics'] + base_nli['law']\nexpand_label_nli['Business'] = base_nli['business']\nexpand_label_nli['Sports'] = base_nli['sports']\n\n# uci\nexpand_label_nli['entertainment'] = base_nli['entertainment']\nexpand_label_nli['business'] = base_nli['business']\nexpand_label_nli['science and technology'] = base_nli['science'] + base_nli['technology']\nexpand_label_nli['health'] = base_nli['health']\n# nyt\nexpand_label_nli['education'] = base_nli['education']\nexpand_label_nli['arts'] = base_nli['arts']\nexpand_label_nli['politics'] = base_nli['politics']\nexpand_label_nli['sports'] = base_nli['sports']\nexpand_label_nli['estate'] = base_nli['estate']\nexpand_label_nli['science'] = base_nli['science'] \nexpand_label_nli['technology'] = base_nli['technology']\n\n\n\n\n\n\n\n","repo_name":"yananchen1989/PLMs_text_classification","sub_path":"utils/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":18968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"41908632857","text":"from abc import ABC, abstractmethod\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport copy\nfrom enum import Enum\nimport numpy as np\nimport pandas as pd\nimport os\nfrom rdkit import Chem\nimport os\nimport sys\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom rdkit.Chem import AllChem, Descriptors\n\nfrom feature.mol_featurizer import SUPPORTED_MOL_FEATURIZER, MolMultiModalFeaturizer\nfrom utils.kg_utils import SUPPORTED_KG, embed\nfrom utils.split_utils import random_split, scaffold_split\nfrom utils import Normalizer\n\nsys.path.insert(0, os.path.dirname(__file__))\n\ndef _load_bbbp_dataset(input_path):\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n\n preprocessed_rdkit_mol_objs_list = [m if m is not None else None\n for m in rdkit_mol_objs_list]\n preprocessed_smiles_list = [AllChem.MolToSmiles(m) if m is not None else None\n for m in preprocessed_rdkit_mol_objs_list]\n labels = input_df['p_np']\n # convert 0 to -1\n labels = labels.replace(0, -1)\n # there are no nans\n assert len(smiles_list) == len(preprocessed_rdkit_mol_objs_list)\n assert len(smiles_list) == len(preprocessed_smiles_list)\n assert len(smiles_list) == len(labels)\n return preprocessed_smiles_list, \\\n preprocessed_rdkit_mol_objs_list, labels.values\n\n\ndef _load_clintox_dataset(input_path):\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n\n preprocessed_rdkit_mol_objs_list = [m if m is not None else None\n for m in rdkit_mol_objs_list]\n preprocessed_smiles_list = [AllChem.MolToSmiles(m) if m is not None else None\n for m in preprocessed_rdkit_mol_objs_list]\n tasks = ['FDA_APPROVED', 'CT_TOX']\n labels = input_df[tasks]\n # convert 0 to -1\n labels = labels.replace(0, -1)\n # there are no nans\n assert len(smiles_list) == len(preprocessed_rdkit_mol_objs_list)\n assert len(smiles_list) == len(preprocessed_smiles_list)\n assert len(smiles_list) == len(labels)\n return preprocessed_smiles_list, \\\n preprocessed_rdkit_mol_objs_list, labels.values\n\n\n# input_path = 'dataset/clintox/raw/clintox.csv'\n# smiles_list, rdkit_mol_objs_list, labels = _load_clintox_dataset(input_path)\n\ndef _load_esol_dataset(input_path):\n # NB: some examples have multiple species\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n labels = input_df['measured log solubility in mols per litre']\n assert len(smiles_list) == len(rdkit_mol_objs_list)\n assert len(smiles_list) == len(labels)\n return smiles_list, rdkit_mol_objs_list, labels.values\n\n\n# input_path = 'dataset/esol/raw/delaney-processed.csv'\n# smiles_list, rdkit_mol_objs_list, labels = _load_esol_dataset(input_path)\n\ndef _load_freesolv_dataset(input_path):\n\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n labels = input_df['expt']\n assert len(smiles_list) == len(rdkit_mol_objs_list)\n assert len(smiles_list) == len(labels)\n return smiles_list, rdkit_mol_objs_list, labels.values\n\n\ndef _load_lipophilicity_dataset(input_path):\n\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n labels = input_df['exp']\n assert len(smiles_list) == len(rdkit_mol_objs_list)\n assert len(smiles_list) == len(labels)\n return smiles_list, rdkit_mol_objs_list, labels.values\n\n\ndef _load_malaria_dataset(input_path):\n\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n labels = input_df['activity']\n assert len(smiles_list) == len(rdkit_mol_objs_list)\n assert len(smiles_list) == len(labels)\n return smiles_list, rdkit_mol_objs_list, labels.values\n\n\ndef _load_cep_dataset(input_path):\n\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n labels = input_df['PCE']\n assert len(smiles_list) == len(rdkit_mol_objs_list)\n assert len(smiles_list) == len(labels)\n return smiles_list, rdkit_mol_objs_list, labels.values\n\n\ndef _load_muv_dataset(input_path):\n\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n tasks = ['MUV-466', 'MUV-548', 'MUV-600', 'MUV-644', 'MUV-652', 'MUV-689',\n 'MUV-692', 'MUV-712', 'MUV-713', 'MUV-733', 'MUV-737', 'MUV-810',\n 'MUV-832', 'MUV-846', 'MUV-852', 'MUV-858', 'MUV-859']\n labels = input_df[tasks]\n # convert 0 to -1\n labels = labels.replace(0, -1)\n # convert nan to 0\n labels = labels.fillna(0)\n assert len(smiles_list) == len(rdkit_mol_objs_list)\n assert len(smiles_list) == len(labels)\n return smiles_list, rdkit_mol_objs_list, labels.values\n\n\ndef check_columns(df, tasks, N):\n bad_tasks = []\n total_missing_count = 0\n for task in tasks:\n value_list = df[task]\n pos_count = sum(value_list == 1)\n neg_count = sum(value_list == -1)\n missing_count = sum(value_list == 0)\n total_missing_count += missing_count\n pos_ratio = 100. * pos_count / (pos_count + neg_count)\n missing_ratio = 100. * missing_count / N\n assert pos_count + neg_count + missing_count == N\n if missing_ratio >= 50:\n bad_tasks.append(task)\n print('task {}\\t\\tpos_ratio: {:.5f}\\tmissing ratio: {:.5f}'.format(task, pos_ratio, missing_ratio))\n print('total missing ratio: {:.5f}'.format(100. * total_missing_count / len(tasks) / N))\n return bad_tasks\n\n\ndef check_rows(labels, N):\n from collections import defaultdict\n p, n, m = defaultdict(int), defaultdict(int), defaultdict(int)\n bad_count = 0\n for i in range(N):\n value_list = labels[i]\n pos_count = sum(value_list == 1)\n neg_count = sum(value_list == -1)\n missing_count = sum(value_list == 0)\n p[pos_count] += 1\n n[neg_count] += 1\n m[missing_count] += 1\n if pos_count + neg_count == 0:\n bad_count += 1\n print('bad_count\\t', bad_count)\n \n print('pos\\t', p)\n print('neg\\t', n)\n print('missing\\t', m)\n return\n\n\ndef _load_pcba_dataset(input_path):\n input_df = pd.read_csv(input_path, sep=',')\n tasks = list(input_df.columns)[:-2]\n\n N = input_df.shape[0]\n temp_df = input_df[tasks]\n temp_df = temp_df.replace(0, -1)\n temp_df = temp_df.fillna(0)\n\n bad_tasks = check_columns(temp_df, tasks, N)\n for task in bad_tasks:\n tasks.remove(task)\n print('good tasks\\t', len(tasks))\n\n labels = input_df[tasks]\n labels = labels.replace(0, -1)\n labels = labels.fillna(0)\n labels = labels.values\n print(labels.shape) # 439863, 92\n check_rows(labels, N)\n\n input_df.dropna(subset=tasks, how='all', inplace=True)\n # convert 0 to -1\n input_df = input_df.replace(0, -1)\n # convert nan to 0\n input_df = input_df.fillna(0)\n labels = input_df[tasks].values\n print(input_df.shape) # 435685, 92\n N = input_df.shape[0]\n check_rows(labels, N)\n\n smiles_list = input_df['smiles'].tolist()\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n\n assert len(smiles_list) == len(rdkit_mol_objs_list)\n assert len(smiles_list) == len(labels)\n return smiles_list, rdkit_mol_objs_list, labels\n\n\ndef _load_sider_dataset(input_path):\n\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n tasks = ['Hepatobiliary disorders',\n 'Metabolism and nutrition disorders', 'Product issues', 'Eye disorders',\n 'Investigations', 'Musculoskeletal and connective tissue disorders',\n 'Gastrointestinal disorders', 'Social circumstances',\n 'Immune system disorders', 'Reproductive system and breast disorders',\n 'Neoplasms benign, malignant and unspecified (incl cysts and polyps)',\n 'General disorders and administration site conditions',\n 'Endocrine disorders', 'Surgical and medical procedures',\n 'Vascular disorders', 'Blood and lymphatic system disorders',\n 'Skin and subcutaneous tissue disorders',\n 'Congenital, familial and genetic disorders',\n 'Infections and infestations',\n 'Respiratory, thoracic and mediastinal disorders',\n 'Psychiatric disorders', 'Renal and urinary disorders',\n 'Pregnancy, puerperium and perinatal conditions',\n 'Ear and labyrinth disorders', 'Cardiac disorders',\n 'Nervous system disorders',\n 'Injury, poisoning and procedural complications']\n labels = input_df[tasks]\n # convert 0 to -1\n labels = labels.replace(0, -1)\n assert len(smiles_list) == len(rdkit_mol_objs_list)\n assert len(smiles_list) == len(labels)\n return smiles_list, rdkit_mol_objs_list, labels.values\n\n\ndef _load_toxcast_dataset(input_path):\n\n # NB: some examples have multiple species, some example smiles are invalid\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n # Some smiles could not be successfully converted\n # to rdkit mol object so them to None\n preprocessed_rdkit_mol_objs_list = [m if m is not None else None\n for m in rdkit_mol_objs_list]\n preprocessed_smiles_list = [AllChem.MolToSmiles(m) if m is not None else None\n for m in preprocessed_rdkit_mol_objs_list]\n tasks = list(input_df.columns)[1:]\n labels = input_df[tasks]\n # convert 0 to -1\n labels = labels.replace(0, -1)\n # convert nan to 0\n labels = labels.fillna(0)\n assert len(smiles_list) == len(preprocessed_rdkit_mol_objs_list)\n assert len(smiles_list) == len(preprocessed_smiles_list)\n assert len(smiles_list) == len(labels)\n return preprocessed_smiles_list, \\\n preprocessed_rdkit_mol_objs_list, labels.values\n\n# root_path = 'dataset/chembl_with_labels'\ndef check_smiles_validity(smiles):\n try:\n m = Chem.MolFromSmiles(smiles)\n if m:\n return True\n else:\n return False\n except:\n return False\n\n\ndef split_rdkit_mol_obj(mol):\n \"\"\"\n Split rdkit mol object containing multiple species or one species into a\n list of mol objects or a list containing a single object respectively \"\"\"\n\n smiles = AllChem.MolToSmiles(mol, isomericSmiles=True)\n smiles_list = smiles.split('.')\n mol_species_list = []\n for s in smiles_list:\n if check_smiles_validity(s):\n mol_species_list.append(AllChem.MolFromSmiles(s))\n return mol_species_list\n\ndef get_largest_mol(mol_list):\n \"\"\"\n Given a list of rdkit mol objects, returns mol object containing the\n largest num of atoms. If multiple containing largest num of atoms,\n picks the first one \"\"\"\n\n num_atoms_list = [len(m.GetAtoms()) for m in mol_list]\n largest_mol_idx = num_atoms_list.index(max(num_atoms_list))\n return mol_list[largest_mol_idx]\n\ndef _load_tox21_dataset(input_path):\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n tasks = ['NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD',\n 'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53']\n labels = input_df[tasks]\n # convert 0 to -1\n labels = labels.replace(0, -1)\n # convert nan to 0\n labels = labels.fillna(0)\n assert len(smiles_list) == len(rdkit_mol_objs_list)\n assert len(smiles_list) == len(labels)\n return smiles_list, rdkit_mol_objs_list, labels.values\n\n\ndef _load_hiv_dataset(input_path):\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['smiles']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n labels = input_df['HIV_active']\n # convert 0 to -1\n labels = labels.replace(0, -1)\n # there are no nans\n assert len(smiles_list) == len(rdkit_mol_objs_list)\n assert len(smiles_list) == len(labels)\n return smiles_list, rdkit_mol_objs_list, labels.values\n\ndef _load_bace_dataset(input_path):\n input_df = pd.read_csv(input_path, sep=',')\n smiles_list = input_df['mol']\n rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]\n labels = input_df['Class']\n # convert 0 to -1\n labels = labels.replace(0, -1)\n # there are no nans\n folds = input_df['Model']\n folds = folds.replace('Train', 0) # 0 -> train\n folds = folds.replace('Valid', 1) # 1 -> valid\n folds = folds.replace('Test', 2) # 2 -> test\n assert len(smiles_list) == len(rdkit_mol_objs_list)\n assert len(smiles_list) == len(labels)\n assert len(smiles_list) == len(folds)\n # return smiles_list, rdkit_mol_objs_list, folds.values, labels.values\n return smiles_list, rdkit_mol_objs_list, labels.values\n\ndatasetname2function = {\n \"bbbp\": _load_bbbp_dataset,\n \"clintox\": _load_clintox_dataset,\n \"tox21\": _load_tox21_dataset,\n \"toxcast\": _load_toxcast_dataset,\n \"sider\": _load_sider_dataset,\n \"hiv\": _load_hiv_dataset,\n \"bace\": _load_bace_dataset,\n \"muv\": _load_muv_dataset,\n \"freesolv\": _load_freesolv_dataset,\n \"esol\": _load_esol_dataset,\n \"lipophilicity\": _load_lipophilicity_dataset,\n}\n\nclass Task(Enum):\n CLASSFICATION = 0\n REGRESSION = 1\n\nclass DPDataset(Dataset, ABC):\n def __init__(self, path, config, in_memory=True):\n super(DPDataset, self).__init__()\n self.path = path\n self.config = config\n self.in_memory = in_memory\n self._load_data()\n # self._featurize()\n\n @abstractmethod\n def _load_data(self, path):\n raise NotImplementedError\n\n def _featurize(self):\n logger.info(\"Featurizing...\")\n # self.featurized_drugs: 如果是多模态就是一个dict, 如果是structure单模态就是list[Data()]\n self.featurized_drugs = [self.drug_featurizer(drug) for drug in self.drugs]\n self.labels = [torch.tensor(label) for label in self.labels]\n\n def _build(self, save_path=\"\"):\n if len(self.config[\"mol\"][\"modality\"]) > 1 and save_path:\n kg_config = self.config[\"mol\"][\"featurizer\"][\"kg\"]\n self.kg = SUPPORTED_KG[kg_config[\"kg_name\"]](kg_config[\"kg_path\"])\n self.drug2kg, self.drug2text, _, _ = self.kg.link(self)\n # TODO: dp use TransE, don't need filter_out?\n filter_out = []\n \"\"\"\n for i_drug in data_index:\n smi = self.smiles[i_drug]\n #if smi in self.drug2kg:\n # filter_out.append((self.drug2kg[smi], self.protein2kg[protein]))\n \"\"\"\n # embed once for consistency\n try:\n kge = embed(self.kg, 'ProNE', filter_out=filter_out, dim=kg_config[\"embed_dim\"], save=True, save_path=save_path)\n except Exception as e:\n kge = None\n self.config[\"mol\"][\"featurizer\"][\"kg\"][\"kge\"] = kge\n self._configure_featurizer()\n # featurize all data pairs in one pass for training efficency\n if self.in_memory:\n self._featurize()\n\n def _configure_featurizer(self):\n if len(self.config[\"mol\"][\"modality\"]) > 1:\n self.drug_featurizer = MolMultiModalFeaturizer(self.config[\"mol\"])\n self.drug_featurizer.set_drug2kgid_dict(self.drug2kg)\n self.drug_featurizer.set_drug2text_dict(self.drug2text)\n else:\n drug_feat_config = self.config[\"mol\"][\"featurizer\"][\"structure\"]\n self.drug_featurizer = SUPPORTED_MOL_FEATURIZER[drug_feat_config[\"name\"]](drug_feat_config)\n\n def index_select(self, indexes):\n new_dataset = copy.copy(self)\n new_dataset.drugs = [new_dataset.drugs[i] for i in indexes]\n new_dataset.labels = [new_dataset.labels[i] for i in indexes]\n return new_dataset\n\n def __getitem__(self, index):\n if not self.in_memory:\n drug, label = self.drugs[index], self.labels[index]\n return self.drug_featurizer(drug), label\n else:\n return self.featurized_drugs[index], self.labels[index]\n\n def __len__(self):\n return len(self.drugs)\n\nclass MoleculeNetDataset(DPDataset):\n name2target = {\n \"BBBP\": [\"p_np\"],\n \"Tox21\": [\"NR-AR\", \"NR-AR-LBD\", \"NR-AhR\", \"NR-Aromatase\", \"NR-ER\", \"NR-ER-LBD\", \n \"NR-PPAR-gamma\", \"SR-ARE\", \"SR-ATAD5\", \"SR-HSE\", \"SR-MMP\", \"SR-p53\"],\n \"ClinTox\": [\"CT_TOX\", \"FDA_APPROVED\"],\n \"HIV\": [\"HIV_active\"],\n \"Bace\": [\"class\"],\n \"SIDER\": [\"Hepatobiliary disorders\", \"Metabolism and nutrition disorders\", \"Product issues\", \n \"Eye disorders\", \"Investigations\", \"Musculoskeletal and connective tissue disorders\", \n \"Gastrointestinal disorders\", \"Social circumstances\", \"Immune system disorders\", \n \"Reproductive system and breast disorders\", \n \"Neoplasms benign, malignant and unspecified (incl cysts and polyps)\", \n \"General disorders and administration site conditions\", \"Endocrine disorders\", \n \"Surgical and medical procedures\", \"Vascular disorders\", \n \"Blood and lymphatic system disorders\", \"Skin and subcutaneous tissue disorders\", \n \"Congenital, familial and genetic disorders\", \"Infections and infestations\", \n \"Respiratory, thoracic and mediastinal disorders\", \"Psychiatric disorders\", \n \"Renal and urinary disorders\", \"Pregnancy, puerperium and perinatal conditions\", \n \"Ear and labyrinth disorders\", \"Cardiac disorders\", \n \"Nervous system disorders\", \"Injury, poisoning and procedural complications\"],\n \"MUV\": ['MUV-692', 'MUV-689', 'MUV-846', 'MUV-859', 'MUV-644', 'MUV-548', 'MUV-852',\n 'MUV-600', 'MUV-810', 'MUV-712', 'MUV-737', 'MUV-858', 'MUV-713', 'MUV-733',\n 'MUV-652', 'MUV-466', 'MUV-832'],\n \"Toxcast\": [\"\"], # 617\n \"FreeSolv\": [\"expt\"],\n \"ESOL\": [\"measured log solubility in mols per litre\"],\n \"Lipo\": [\"exp\"],\n \"qm7\": [\"u0_atom\"],\n \"qm8\": [\"E1-CC2\", \"E2-CC2\", \"f1-CC2\", \"f2-CC2\", \"E1-PBE0\", \"E2-PBE0\", \n \"f1-PBE0\", \"f2-PBE0\", \"E1-CAM\", \"E2-CAM\", \"f1-CAM\",\"f2-CAM\"],\n \"qm9\": ['mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'cv']\n }\n name2task = {\n \"BBBP\": Task.CLASSFICATION,\n \"Tox21\": Task.CLASSFICATION,\n \"ClinTox\": Task.CLASSFICATION,\n \"HIV\": Task.CLASSFICATION,\n \"Bace\": Task.CLASSFICATION,\n \"SIDER\": Task.CLASSFICATION,\n \"MUV\": Task.CLASSFICATION,\n \"Toxcast\": Task.CLASSFICATION,\n \"FreeSolv\": Task.REGRESSION,\n \"ESOL\": Task.REGRESSION,\n \"Lipo\": Task.REGRESSION,\n \"qm7\": Task.REGRESSION,\n \"qm8\": Task.REGRESSION,\n \"qm9\": Task.REGRESSION\n }\n\n def __init__(self, path, config, name=\"BBBP\", label_type=1):\n if name not in self.name2target:\n raise ValueError(\"%s is not a valid moleculenet task!\" % name)\n file_name = os.listdir(os.path.join(path, name.lower(), \"raw\"))[0]\n assert file_name[-4:] == \".csv\"\n path = os.path.join(path, name.lower(), \"raw\", file_name)\n self.name = name\n self.targets = self.name2target[name]\n # TODO: del: no use\n self.task = self.name2task[name]\n # TODO: del label_type\n self.label_type = label_type\n super(MoleculeNetDataset, self).__init__(path, config)\n self._train_test_split()\n self._normalize()\n \n \n def _load_data(self): \n smiles_list, rdkit_mol_objs, labels = datasetname2function[self.name.lower()](self.path)\n if labels.ndim == 1:\n labels = np.expand_dims(labels, axis=1)\n self.smiles, self.drugs, self.labels = [], [], []\n for i in range(len(smiles_list)):\n rdkit_mol = rdkit_mol_objs[i]\n if rdkit_mol is None:\n continue\n # TODO: drugs and smiles are all get from AllChem.MolFromSmiles()\n self.smiles.append(smiles_list[i])\n self.drugs.append(smiles_list[i])\n # self.drugs.append(rdkit_mol[i])\n self.labels.append(labels[i])\n \n def _train_test_split(self, strategy=\"scaffold\"):\n if strategy == \"random\":\n self.train_index, self.val_index, self.test_index = random_split(len(self), 0.1, 0.1)\n elif strategy == \"scaffold\":\n self.train_index, self.val_index, self.test_index = scaffold_split(self, 0.1, 0.1, is_standard=True)\n\n def _normalize(self):\n if self.name in [\"qm7\", \"qm9\"]:\n self.normalizer = []\n for i in range(len(self.targets)):\n self.normalizer.append(Normalizer(self.labels[:, i]))\n self.labels[:, i] = self.normalizer[i].norm(self.labels[:, i])\n else:\n # TODO:\n self.normalizer = [None] * len(self.targets)\n\nSUPPORTED_DP_DATASETS = {\n \"MoleculeNet\": MoleculeNetDataset\n}","repo_name":"PharMolix/OpenBioMed","sub_path":"open_biomed/datasets/dp_dataset.py","file_name":"dp_dataset.py","file_ext":"py","file_size_in_byte":22025,"program_lang":"python","lang":"en","doc_type":"code","stars":476,"dataset":"github-code","pt":"44"} +{"seq_id":"72510142213","text":"playing = True\nwhile playing:\n print(\"1. left\\n2. right\\n0. quit\")\n try:\n choice = int(input(\"Which direction would you like to go?\\n\"))\n except ValueError:\n print(\"You must select 0,1 or 2.\")\n choice = None\n \n if choice == 1:\n print(\"You chose left.\")\n elif choice == 2:\n print(\"You chose right.\")\n elif choice == 0:\n print(\"Buh bye\")\n playing = False \n ","repo_name":"cefleet/PiBGo","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"13402476894","text":"import logging\nfrom threading import Thread\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\n\nemail_html_template = get_template('email_order.html')\nemail_text_template = get_template('order_email.txt')\nemail_admin_html_template = get_template('email_order_admin.html')\nemail_admin_text_template = get_template('email_order_admin.txt')\n\nlogger = logging.getLogger('order.email')\n\n\nclass EmailThread(Thread):\n def __init__(self, subject, context, recipient_list, send_client=True):\n self.subject = subject\n self.recipient_list = recipient_list\n self.context = context\n self.send_client = send_client\n Thread.__init__(self)\n\n def run(self):\n try:\n html_email_content = email_html_template.render(self.context)\n text_email_content = email_text_template.render(self.context)\n html_email_admin_content = email_admin_html_template.render(self.context)\n text_email_admin_content = email_admin_text_template.render(self.context)\n except Exception as ex:\n logger.error(f'Error while render email template:\\n{ex}')\n raise ex\n\n email = EmailMultiAlternatives('Заказ на Formatlogistic', text_email_content,\n from_email=f'Formatlogistic <{settings.DEFAULT_FROM_EMAIL}>')\n email.attach_alternative(html_email_content, \"text/html\")\n email.to = self.recipient_list\n\n email_admin = EmailMultiAlternatives('Новый заказ на Formatlogistic', text_email_admin_content,\n from_email=f'Formatlogistic <{settings.DEFAULT_FROM_EMAIL}>')\n email_admin.attach_alternative(html_email_admin_content, 'text/html')\n email_admin.to = [f'{settings.DEFAULT_ADMIN_EMAIL_RECIPIENT}']\n\n try:\n email_admin.send()\n logger.info(f\"Email send success {settings.DEFAULT_ADMIN_EMAIL_RECIPIENT}\")\n except Exception as ex:\n logger.error(f'Error while sending email to {settings.DEFAULT_ADMIN_EMAIL_RECIPIENT}:\\n{ex}')\n raise ex\n\n if self.send_client:\n try:\n email.send()\n logger.info(f\"Email send success {self.recipient_list}\")\n except Exception as ex:\n logger.error(f'Error while sending email to {self.recipient_list}:\\n{ex}')\n raise ex\n\n\ndef send_order_email(subject, context, recipient_list, send_client=True):\n EmailThread(subject, context, recipient_list, send_client).start()\n","repo_name":"mnogoruk/fastcustoms","sub_path":"order/email/multithread.py","file_name":"multithread.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35297712895","text":"#!/usr/bin/python3\n\nimport logging\nimport multiprocessing\nfrom rdflib.namespace import RDF, RDFS\nfrom models.rule_base import IRule, Rule\n\n\n\"\"\"\nMulticore implementation of SWARM Semantic Rule Mining algorithm [Barati2016].\n\nAlterations:\n * Limit on exactly two SI sets per CBS is lifted. This allows conjunctions in the consequent, e.g. A -> B /\\ C.\n * Added local coverage of a class type wrt the union of multiple ES in a CBS to prevent overgeneralization.\n * LLC with broadest coverage is prefered.\n * Rules with more-than-one LLC are split.\n * Added all permutations of a CBS to form a rule, e.g. A -> B /\\ C, B -> A /\\ C, and C -> A /\\ B.\n\n@Inbook{Barati2016,\n author=\"Barati, Molood and Bai, Quan and Liu, Qing\",\n editor=\"Booth, Richard and Zhang, Min-Ling\",\n title=\"SWARM: An Approach for Mining Semantic Association Rules from Semantic Web Data\",\n bookTitle=\"PRICAI 2016: Trends in Artificial Intelligence: 14th Pacific Rim International Conference on Artificial Intelligence, Phuket, Thailand, August 22-26, 2016, Proceedings\",\n year=\"2016\",\n publisher=\"Springer International Publishing\",\n address=\"Cham\",\n pages=\"30--43\",\n isbn=\"978-3-319-42911-3\",\n doi=\"10.1007/978-3-319-42911-3_3\",\n}\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\ndef generate_semantic_association_rules(instance_graph, ontology_graph, cbs_sets, queue, rules, minimal_local_support=1.0):\n \"\"\" Generate semantic association rules from CBS\n\n :param instance_graph: a knowledge graph instance\n :param ontology_graph: a knowledge graph instance\n :param cbs_sets: shared list of (CBS, similarity) tuples\n :param queue: shared queue with slices from cbs_sets\n :param rules: shared list of rules as tuples (class type, antecedent, consequent [with conjunctions])\n :param minimal_local_support: skip rules that do not meet the minimal local support\n\n :returns: None\n \"\"\"\n\n pid = multiprocessing.current_process()\n logger.info(\"{} - Generating Semantic Association Rules (LS >= {})\".format(pid, minimal_local_support))\n while True:\n work = queue.get()\n if work is None:\n break\n\n for cbs, _ in cbs_sets[work]:\n for ctype, (coverage, local_support) in _lowest_level_class(instance_graph, ontology_graph, cbs).items():\n if local_support < minimal_local_support:\n continue\n\n cbs_list = list(cbs)\n for i in range(len(cbs_list)):\n rules.append(Rule(ctype, cbs_list[i][1], [pa for _, pa in cbs_list[:i]+cbs_list[i+1:]]))\n\n logger.info(\"{} - Generated {} Semantic Association Rules\".format(pid, len(rules)))\n\ndef generate_semantic_item_sets(instance_graph):\n \"\"\" Generate semantic item sets from a knowledge graph\n\n :param instance_graph: shared knowledge graph instance as list\n\n :returns: dictionary with (p, o) pairs as keys and set of matching s as value\n \"\"\"\n\n pid = multiprocessing.current_process()\n logger.info(\"{} - Generating Semantic Item Set\".format(pid))\n d = {}\n for s, p, o in instance_graph:\n k = (p, o)\n\n if k in d.keys():\n d[k].add(s)\n continue\n\n d[k] = {s}\n\n logger.info(\"{} - Generated {} Semantic Item Sets\".format(pid, len(d)))\n return d\n\ndef generate_common_behaviour_sets(item_sets, cb_sets, queue, similarity_threshold=.75):\n \"\"\" Generate Common Behaviour Sets (CBS) from Semantic Item Sets\n\n :param item_sets: shared dictionary with (p, o)-pairs as key and item sets as value\n :param cb_sets: shared list of tuples (CBS, s), with s being the similarity\n :param queue: shared queue with slices from item_sets\n :param similarity_threshold: only generalize if the similarity exceeds this value\n\n :returns: None\n \"\"\"\n\n pid = multiprocessing.current_process()\n logger.info(\"{} - Generating Common Behaviour Sets (sim >= {})\".format(pid, similarity_threshold))\n\n while True:\n work = queue.get()\n if work is None:\n break\n\n # if similarity_threshold <= 0, then (n*(n-1))/2 cb sets are generated\n keys = list(item_sets.keys())\n n = len(keys)\n for i in work:\n if i >= n:\n break\n\n pa_0 = keys[i]\n es_0 = frozenset(item_sets[pa_0])\n for pa_1 in keys[i+1:]:\n es_1 = frozenset(item_sets[pa_1])\n similarity = _similarity_of(es_0, es_1)\n if similarity < similarity_threshold:\n continue\n\n cb_sets.append((frozenset({\n (es_0, pa_0),\n (es_1, pa_1)}),\n similarity))\n\n\n logger.info(\"{} - Generated {} Common Behaviour Sets\".format(pid, len(cb_sets)))\n\ndef extend_common_behaviour_sets(cbs_list, similarity_threshold=.75, work=None):\n \"\"\" Recursively extend Common Behaviour Sets (CBS)\n\n :param cbs_list: shared list of tuples tuples (CBS, s), with s being the similarity\n :param similarity_threshold: only generalize if the similarity exceeds this value\n :param work: range of cbs_list to focus on\n\n :returns: list of additions CB sets\n \"\"\"\n\n if len(cbs_list) <= 1:\n return []\n\n pid = multiprocessing.current_process()\n logger.info(\"{} - Extending Common Behaviour Sets (sim >= {})\".format(pid, similarity_threshold))\n extended_cbs_list = []\n n = len(cbs_list)\n for i in work:\n if i >= n:\n break\n\n cbs_0, _ = cbs_list[i]\n es_0 = frozenset.union(*[es for es, _ in cbs_0])\n for cbs_1, _ in cbs_list[i+1:]:\n es_1 = frozenset.union(*[es for es, _ in cbs_1])\n similarity = _similarity_of(es_0, es_1)\n if similarity < similarity_threshold:\n continue\n\n extended_cbs_list.append((frozenset.union(cbs_0, cbs_1), similarity))\n\n logger.info(\"{} - Extended with {} Common Behaviour Sets\".format(pid, len(extended_cbs_list)))\n return extended_cbs_list\n\ndef _similarity_of(*list_of_element_sets):\n \"\"\" Calculate similarity between element sets\n\n :param list_of_element_sets: list of element sets\n\n :returns: similarity value 0.0 <= v <= 1.0\n \"\"\"\n\n if len(frozenset.union(*list_of_element_sets)) > 0:\n return (len(frozenset.intersection(*list_of_element_sets)) /\n len(frozenset.union(*list_of_element_sets)))\n else:\n return 0.0\n\ndef _class_hierarchy_branches(instance_graph, ontology_graph, cbs):\n \"\"\" Generate class hierarchy branch of all ESs in a CBS\n\n :param instance_graph: a knowledge graph instance\n :param ontology_graph: a knowledge graph instance\n :param cbs: a CBS as a set\n\n :returns: a dictionary with elements as keys and branches as (nested) lists\n \"\"\"\n pid = multiprocessing.current_process()\n logger.debug(\"{} - Determining class hierarchy branches\".format(pid))\n element_branches = {}\n for es, _ in cbs:\n for e in es:\n if e in element_branches.keys():\n continue\n\n branch = []\n for t in instance_graph.graph.objects(e, RDF.type):\n subbranch = [t]\n _branch_traversal(ontology_graph, t, subbranch)\n\n branch.append(subbranch)\n\n element_branches[e] = branch\n\n return element_branches\n\ndef _coverage_per_class(instance_graph, element_branches):\n \"\"\" Determine instance coverage of class types\n\n :param instance_graph: a knowledge graph instance\n :param cbs: a dictionary with elements as keys and branches as (nested) lists\n\n :returns: a dictionary with class types as keys and (instances, local coverage) tuples as items\n \"\"\"\n pid = multiprocessing.current_process()\n logger.debug(\"{} - Determining coverage per class\".format(pid))\n coverage = {}\n for e in element_branches.keys():\n for ctype in instance_graph.graph.objects(e, RDF.type):\n if ctype in coverage.keys():\n continue\n coverage[ctype] = ({e}, 0.0)\n for f in element_branches.keys():\n if e is f:\n continue\n for branch in element_branches[f]:\n if ctype in branch: # ignore multiple inheritence\n coverage[ctype][0].add(f)\n break\n\n return coverage\n\ndef _lowest_level_class(instance_graph=None, ontology_graph=None, cbs=frozenset()):\n \"\"\" Determine the Lower Level Classes of the SE's in CBS\n\n :param instance_graph: a knowledge graph instance\n :param ontology_graph: a knowledge graph instance\n :param cbs: a CBS (set of (es, pa) sets)\n\n :returns: a dictionary holding class:({covered elements}, coverage support) items\n \"\"\"\n # determing class hierarchy for elements in cbs\n element_branches = _class_hierarchy_branches(instance_graph, ontology_graph, cbs)\n number_of_elements = len(element_branches)\n\n # determine coverage per class\n coverage = _coverage_per_class(instance_graph, element_branches)\n\n pid = multiprocessing.current_process()\n logger.debug(\"{} - Filtering LLC coverage\".format(pid))\n sorted_keys = sorted(coverage, key=lambda k: len(coverage[k][0]), reverse=True)\n for i in range(1, len(sorted_keys)):\n # prever broader coverage\n coverage[sorted_keys[i]][0].difference_update(*[coverage[sorted_keys[j]][0] for j in range(i)])\n if len(coverage[sorted_keys[i]][0]) <= 0:\n del coverage[sorted_keys[i]]\n continue\n\n # support within this CBS\n if number_of_elements > 0:\n coverage[sorted_keys[i]] = (coverage[sorted_keys[i]][0], len(coverage[sorted_keys[i]][0]) / number_of_elements)\n else:\n coverage[sorted_keys[i]] = 0.0\n\n return coverage\n\ndef _branch_traversal(ontology_graph, ctype, branch):\n \"\"\" Determine the upward class hierarchy branch for each class\n\n :param ontology_graph: a knowledge graph instance\n :param ctype: the class type to start from\n :param branch: a list representing the branch, multiple inheritence results in nested lists\n\n :updates: branch\n :returns: none\n \"\"\"\n sclasses = list(ontology_graph.graph.objects(ctype, RDFS.subClassOf))\n if len(sclasses) == 1:\n sclass = sclasses[0]\n branch.append(sclass)\n _branch_traversal(ontology_graph, sclass, branch)\n elif len(sclasses) >= 2:\n for sclass in sclasses:\n subbranch = [sclass]\n _branch_traversal(ontology_graph, sclass, subbranch)\n branch.append(subbranch)\n\ndef evaluate_rules(instance_graph, rules, queue, final_rule_set, minimal_support, minimal_confidence):\n \"\"\" Evaluate suggested rule r given knowledge graph G on support and confidence\n\n :param instance_graph: a knowledge graph instance\n :param rules: shared list of semantic association rules as tuple (type, antecedent, consequent(s))\n :param queue: slice of rules to focus on\n :param final_rule_set: shared list of accepted rules\n :param minimal_support: only accept rules with a higher support\n :param minimal_confidence: only accept rules with a higher confidence\n\n :returns: none\n \"\"\"\n pid = multiprocessing.current_process()\n logger.info(\"{} - Starting rule evaluation (sup >= {}, conf >= {}\".format(pid, \n minimal_support,\n minimal_confidence))\n while True:\n work = queue.get()\n if work is None:\n break\n\n for rule in rules[work]:\n support = support_of(instance_graph, rule)\n confidence = confidence_of(instance_graph, rule)\n\n if support.value >= minimal_support and\\\n confidence.value >= minimal_confidence:\n final_rule_set.append(IRule(rule, support, confidence))\n\ndef support_of(instance_graph, rule):\n \"\"\" Calculate the support for rule r given knowledge graph G\n\n :param instance_graph: a knowledge graph instance\n :param rule: a semantic association rule as tuple (type, antecedent, consequent(s))\n\n :returns: support value between 0 and 1\n \"\"\"\n p, o = rule.antecedent # antecedent\n\n pid = multiprocessing.current_process()\n logger.debug(\"{} - Calculating support\".format(pid))\n number_of_supporting_facts = 0\n elements_of_type = frozenset(instance_graph.graph.subjects(RDF.type, rule.ctype))\n for s in elements_of_type:\n if (s, p, o) in instance_graph.graph:\n number_of_supporting_facts += 1\n\n if len(elements_of_type) > 0:\n support = number_of_supporting_facts / len(elements_of_type)\n else:\n support = 0.0\n\n return IRule.Measure(support, number_of_supporting_facts, len(elements_of_type))\n\ndef confidence_of(instance_graph, rule):\n \"\"\" Calculate the confidence for rule r given knowledge graph G\n\n :param instance_graph: a knowledge graph instance\n :param rule: a semantic association rule as tuple (type, antecedent, consequent(s))\n\n :returns: confidence value between 0 and 1\n \"\"\"\n p_0, o_0 = rule.antecedent # antecedent\n\n pid = multiprocessing.current_process()\n logger.debug(\"{} - Calculating confidence\".format(pid))\n number_of_antecedent_supporting_facts = 0\n number_of_rule_supporting_facts = 0\n elements_of_type = frozenset(instance_graph.graph.subjects(RDF.type, rule.ctype))\n for s in elements_of_type:\n if (s, p_0, o_0) not in instance_graph.graph:\n continue\n\n number_of_antecedent_supporting_facts += 1\n for p_1, o_1 in rule.consequent: # consequent\n if (s, p_1, o_1) not in instance_graph.graph:\n break\n\n else:\n number_of_rule_supporting_facts += 1\n\n if number_of_antecedent_supporting_facts > 0:\n confidence = number_of_rule_supporting_facts / number_of_antecedent_supporting_facts\n else:\n confidence = 0.0\n\n return IRule.Measure(confidence, number_of_rule_supporting_facts, number_of_antecedent_supporting_facts)\n\nif __name__ == \"__main__\":\n print(\"Functions for Semantic Rule Learning\")\n","repo_name":"wxwilcke/MINOS","sub_path":"algorithms/semantic_rule_learning_mp.py","file_name":"semantic_rule_learning_mp.py","file_ext":"py","file_size_in_byte":14231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"34903049629","text":"import base64\nimport datetime\nimport os\n\nimport pandas as pd\nfrom bokeh.embed import file_html\nfrom bokeh.io import save\nfrom bokeh.layouts import gridplot\nfrom bokeh.models import Div, NumeralTickFormatter, Panel, Tabs, Title\nfrom bokeh.models import Range1d\nfrom bokeh.palettes import d3\nfrom bokeh.plotting import figure\nfrom bokeh.resources import CDN\nfrom tornado import ioloop, web\n\nfrom .src.cluster import Cluster\nfrom .src.trace_io import TraceReport\n\n\ndef average(data):\n return sum(data) / len(data)\n\n\ndef resample(data, time, period=\"1S\"):\n data.index = time\n data = data.resample(period).first()\n return data.interpolate()\n\n\ndef get_sorted_nodes(cluster, keys):\n nodes = []\n\n for key in keys:\n key_nodes = cluster.get_processes_by_key(key)\n for (node, _) in key_nodes:\n if node not in nodes:\n nodes.append(node)\n return nodes\n\n\ndef process_name(process):\n return process.get(\"name\", process.get(\"key\")) or \"Unknown process\"\n\n\ndef plot_time_chart(data, draw_fn, min_datetime, max_datetime, generate_rows=None):\n if not generate_rows:\n def generate_rows(node, frame):\n return [[(node, frame)]]\n\n result = []\n\n for (node, frame) in data:\n rows = generate_rows(node, frame)\n for row in rows:\n columns = []\n for col in row:\n f = figure(plot_width=1000, plot_height=250,\n x_range=[min_datetime, max_datetime],\n x_axis_type='datetime')\n draw_fn(col, f)\n columns.append(f)\n\n result.append(columns)\n return gridplot(result)\n\n\ndef plot_resources_usage(report):\n monitor = report.monitor\n nodes = get_sorted_nodes(report.cluster, [\"monitor\"])\n data = [(node, monitor[node]) for node in nodes if node in monitor]\n\n if not data:\n return Div(text=\"(no data)\")\n\n datetimes = pd.concat(frame[\"datetime\"] for (_, frame) in data)\n\n min_datetime = datetimes.min() - datetime.timedelta(seconds=1)\n max_datetime = datetimes.max()\n\n def generate_rows(node, frame):\n return [\n [\n (node, frame, \"cpu\"),\n (node, frame, \"network\")\n ],\n [\n (node, frame, \"mem\"),\n (node, frame, \"net-connections\")\n ]\n ]\n\n def draw(args, figure):\n node, frame, method = args\n time = frame[\"datetime\"]\n resources = frame[\"resources\"]\n\n if method == \"cpu\":\n figure.plot_width = 950\n figure.plot_height = 400\n else:\n figure.plot_width = 950\n figure.plot_height = 400\n\n figure.min_border_right = 20\n\n if method in (\"cpu\", \"mem\"):\n figure.y_range = Range1d(0, 1)\n\n if len(resources) == 0:\n return\n\n def draw_bytes(title, read_col, write_col):\n if resources.iloc[0].get(read_col) is None or resources.iloc[0].get(write_col) is None:\n return\n\n def accumulate(column):\n values = resources.apply(lambda res: res[column])\n values = values - values.min()\n return resample(values, time)\n\n read = accumulate(read_col)\n write = accumulate(write_col)\n\n figure.yaxis[0].formatter = NumeralTickFormatter(format=\"0.0b\")\n figure.line(read.index, read, color=\"blue\", legend_label=\"{} RX\".format(title))\n figure.line(write.index, write, color=\"red\", legend_label=\"{} TX\".format(title))\n\n if method == \"cpu\":\n cpu_count = len(resources.iloc[0][\"cpu\"])\n cpus = [resample(resources.apply(lambda res: res[\"cpu\"][i]), time) for i in range(cpu_count)]\n cpu_mean = resample(resources.apply(lambda res: average(res[\"cpu\"])), time)\n\n processes = (process_name(p) for p in report.cluster.nodes[node] if process_name(p))\n figure.title = Title(text=\"{}: {}\".format(node, \",\".join(processes)))\n\n figure.yaxis[0].formatter = NumeralTickFormatter(format=\"0 %\")\n\n palette = d3[\"Category20\"][20]\n for (i, cpu) in enumerate(cpus):\n color = palette[i % 20]\n figure.line(cpu.index, cpu / 100.0, color=color, legend_label=f\"CPU #{i}\")\n figure.line(cpu_mean.index, cpu_mean / 100.0, color=\"red\", legend_label=f\"Average CPU\", line_dash=\"dashed\",\n line_width=5)\n elif method == \"mem\":\n mem = resample(resources.apply(lambda res: res[\"mem\"]), time)\n figure.yaxis[0].formatter = NumeralTickFormatter(format=\"0 %\")\n figure.line(mem.index, mem / 100.0, color=\"red\", legend_label=\"Memory\")\n elif method == \"network\":\n draw_bytes(\"Net\", \"net-read\", \"net-write\")\n elif method == \"net-connections\":\n connections = resample(resources.apply(lambda res: res[\"connections\"]), time)\n figure.line(connections.index, connections, legend_label=\"Network connections\")\n elif method == \"io\":\n draw_bytes(\"Disk\", \"disk-read\", \"disk-write\")\n\n return plot_time_chart(data, draw, min_datetime=min_datetime, max_datetime=max_datetime,\n generate_rows=generate_rows)\n\n\ndef plot_profile(flamegraph):\n with open(flamegraph, \"rb\") as f:\n data = f.read()\n base64_content = base64.b64encode(data).decode()\n content = f\"\"\"\"\"\"\n return Div(text=content)\n\n\ndef plot_output(report):\n nodes = [k for k in report.outputs.keys() if not k.startswith(\"monitor\")]\n tabs = []\n\n for node in nodes:\n output_tabs = []\n for output in (\"out\", \"err\"):\n content = report.outputs[node].get(output, \"\")\n if content:\n content = content.replace(\"\\n\", \"
\")\n output_tabs.append(Panel(child=Div(text=content, sizing_mode='stretch_both'), title=output))\n\n if output_tabs:\n tabs.append(Panel(child=Tabs(tabs=output_tabs, sizing_mode='stretch_both'), title=node))\n\n return Tabs(tabs=tabs, sizing_mode='stretch_both')\n\n\ndef create_page(report, directory):\n structure = [\n (\"Resources\", plot_resources_usage),\n (\"Output\", plot_output)\n ]\n\n flamegraph = os.path.join(directory, \"scheduler.svg\")\n if os.path.isfile(flamegraph):\n structure.append((\"Scheduler profile\", lambda _: plot_profile(flamegraph)))\n\n tabs = []\n for name, fn in structure:\n f = fn(report)\n tabs.append(Panel(child=f, title=name))\n\n return Tabs(tabs=tabs)\n\n\ndef load_report(cluster_file):\n with open(cluster_file) as file:\n cluster = Cluster.deserialize(file)\n if not os.path.isdir(cluster.workdir):\n new_workdir = os.path.abspath(os.path.dirname(cluster_file))\n print(f\"Cluster workdir {cluster.workdir} not found, setting to {new_workdir}\")\n cluster.workdir = new_workdir\n return TraceReport.load(cluster)\n\n\ndef generate(cluster_file, output):\n report = load_report(cluster_file)\n directory = os.path.dirname(cluster_file)\n page = create_page(report, directory)\n save(page, output, title=\"Cluster monitor\", resources=CDN)\n\n\ndef serve(cluster_file, port):\n directory = os.path.dirname(cluster_file)\n\n class Handler(web.RequestHandler):\n def get(self):\n report = load_report(cluster_file)\n page = create_page(report, directory)\n self.write(file_html(page, CDN, \"Cluster report\"))\n\n app = web.Application([\n (r\"/\", Handler),\n ])\n app.listen(port)\n\n print(f\"Serving report at http://0.0.0.0:{port}\")\n ioloop.IOLoop.current().start()\n","repo_name":"It4innovations/rsds","sub_path":"scripts/monitor/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"44"} +{"seq_id":"72212709572","text":"#!/usr/bin/env python\n\nimport plotly.plotly as py\nimport plotly.io as pio\n\nclass FlowAnalyzer:\n def __init__(self):\n pass\n\n def make_chart(self, file_name='chart.png'):\n data = dict(\n type='sankey',\n node = dict(\n pad = 15,\n thickness = 20,\n line = dict(\n color = \"black\",\n width = 0.5\n ),\n label = [\"A1\", \"A2\", \"B1\", \"B2\", \"C1\", \"C2\"],\n color = [\"blue\", \"blue\", \"blue\", \"blue\", \"blue\", \"blue\"]\n ),\n link = dict(\n source = [0,1,0,2,3,3],\n target = [2,3,3,4,4,5],\n value = [8,4,2,8,4,2]\n ))\n\n layout = dict(\n title = \"Basic Sankey Diagram\",\n font = dict(\n size = 10\n )\n )\n\n fig = dict(data=[data], layout=layout)\n pio.write_image(fig, file_name)\n \n\ndef main():\n fa = FlowAnalyzer()\n fa.make_chart()\n \nif __name__ == '__main__':\n main()\n","repo_name":"daniele-sartiano/DeepNetwork","sub_path":"src/utils/flows_analyzer.py","file_name":"flows_analyzer.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"35739610032","text":"from datetime import datetime\nfrom django.http import HttpResponse\nfrom django.utils.datastructures import SortedDict\nfrom django.shortcuts import render_to_response, redirect, get_object_or_404\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nimport simplejson\nfrom apps.coastalmodels.forms import SWANInputForm, ModelInputForm, ModelInputDataForm\nfrom apps.coastalmodels.models import SWANInput, ModelInput, ModelInputData, CoastalModel\nfrom settings import settings\n\nfrom settings.settings import MEDIA_ROOT\nfrom utils.system import mkdir_p\nimport os\n\n@login_required\ndef modelinput_add(request):\n # sticks in a POST or renders empty form\n form = ModelInputForm(request.user, request.POST or None)\n if form.is_valid():\n cmodel = form.save(commit=False)\n cmodel.time_created = datetime.now()\n cmodel.last_modified = datetime.now()\n cmodel.user = request.user\n cmodel.save()\n\n modelinput = get_object_or_404(ModelInput, pk=cmodel.id)\n cmodel.input_dir = os.path.abspath(\"/users/U%02d/modelinput/M%02d\" % (cmodel.user.id, modelinput.id))\n mkdir_p(os.path.abspath(\"%s%s\" % (MEDIA_ROOT, cmodel.input_dir)))\n cmodel.name = \"%s-%s-M%02d\" % (cmodel.project.name, cmodel.model.name, cmodel.id)\n cmodel.save()\n\n #TODO refactoring out the hardcoded model reference here.\n if not cmodel.upload_parfile:\n if cmodel.model.name == \"SWAN\":\n return redirect(reverse(\"coastalmodels_swaninputadd\"))\n else:\n #TODO implement the file uploader...\n return redirect(reverse(\"coastalmodels_modelinputlist\"))\n\n return render_to_response('coastalmodels/modelinput_add.html',\n {'modelinput_form': form},\n context_instance=RequestContext(request))\n\n# rewrite the modelinput interface to follow \"DRY\".\n@login_required\ndef modelinput_view(request, modelinput_id):\n object = get_object_or_404(ModelInput, pk=modelinput_id)\n if request.user == object.user or request.user.groups.filter(name=object.group):\n return render_to_response('coastalmodels/modelinput_detail.html',\n {'object': object},\n context_instance=RequestContext(request))\n else:\n return redirect(reverse(\"coastalmodels_modelinputlist\"))\n\n\n@login_required\ndef modelinput_edit(request, modelinput_id):\n modelinput = get_object_or_404(ModelInput, pk=modelinput_id)\n\n if request.user != modelinput.user:\n return redirect(reverse(\"coastalmodels_modelinputlist\"))\n\n form = ModelInputForm(request.user, request.POST or None, instance=modelinput)\n\n if form.is_valid():\n mimodel = form.save(commit=False)\n #add extra attributes that users shall not add\n mimodel.last_modified = datetime.now()\n\n #mimodel.name = 'test1'\n mimodel.save()\n\n # for illegal request we simple ignore.\n return redirect(reverse(\"coastalmodels_modelinputlist\"))\n\n else:\n return render_to_response('coastalmodels/modelinput_edit.html',\n {'modelinput_form': form, 'modelinput_id': modelinput_id},\n context_instance=RequestContext(request))\n\n\n@login_required\ndef modelinput_delete(request, modelinput_id):\n modelinput = get_object_or_404(ModelInput, pk=modelinput_id)\n if request.user == modelinput.user:\n modelinput.delete()\n\n return redirect(reverse(\"coastalmodels_modelinputlist\"))\n\n\n@login_required\ndef modelinputdata_add(request, modelinput_id):\n modelinput = get_object_or_404(ModelInput, pk=modelinput_id)\n\n if request.user == modelinput.user or request.user.groups.filter(name=modelinput.group):\n # sticks in a POST or renders empty form\n form = ModelInputDataForm(request.user, request.POST or None)\n\n if form.is_valid():\n imodel = form.save(commit=False)\n imodel.model_input = modelinput\n imodel.user = request.user\n\n imodel.project = modelinput.project\n imodel.group = modelinput.group\n imodel.model = modelinput.model\n imodel.time_created = datetime.now()\n imodel.last_modified = datetime.now()\n imodel.input_dir = modelinput.input_dir\n imodel.save()\n imodel.name = \"%s-ID%02d\" % (modelinput.name, imodel.id)\n imodel.save()\n\n datafile = request.FILES.get('file')\n dataurl = settings.MEDIA_URL + imodel.input_dir + datafile.name.replace(\" \", \"_\")\n\n data = [{'name': datafile.name, 'url': dataurl, 'thumbnail_url': dataurl,\n 'delete_url': reverse('coastalmodels_modelinputdatadelete', args=[imodel.id]),\n 'delete_type': \"DELETE\"}]\n\n response = JSONResponse(data, {}, response_mimetype(request))\n response['Content-Disposition'] = 'inline; filename=files.json'\n return response\n\n return render_to_response('coastalmodels/modelinputdata_add.html',\n {'modelinputdata_form': form, 'modelinput_id': modelinput_id},\n context_instance=RequestContext(request))\n\n return redirect(reverse(\"coastalmodels_modelinputlist\"))\n\n@login_required\ndef response_mimetype(request):\n if \"application/json\" in request.META['HTTP_ACCEPT']:\n return \"application/json\"\n else:\n return \"text/plain\"\n\n@login_required\ndef modelinputdata_delete(request, modelinputdata_id):\n modelinputdata = get_object_or_404(ModelInputData, pk=modelinputdata_id)\n\n if request.user == modelinputdata.user:\n modelinputdata.delete()\n if request.is_ajax():\n response = JSONResponse(True, {}, response_mimetype(request))\n response['Content-Disposition'] = 'inline; filename=files.json'\n return response\n\n return redirect(reverse(\"coastalmodels_modelinputlist\"))\n\n\n@login_required\ndef modelinput_export(request, modelinput_id):\n modelinput = get_object_or_404(ModelInput, pk=modelinput_id)\n try:\n # if modelinput.model.name == \"SWAN\" and modelinput.swaninput:\n return swaninput_export(request, modelinput.swaninput.id)\n # if modelinput.model.name == \"Delft3D\":\n # return delft3d_export(request, modelinput.delft3d_input.id)\n except:\n return redirect(reverse(\"coastalmodels_modelinputlist\"))\n\n\n@login_required\ndef swaninput_add(request):\n # sticks in a POST or renders empty form\n form = SWANInputForm(request.user, request.POST or None)\n if form.is_valid():\n swmodel = form.save(commit=False)\n swmodel.user = request.user\n swmodel.time_created = datetime.now()\n swmodel.last_modified = datetime.now()\n swmodel.project = swmodel.model_input.project\n swmodel.model = get_object_or_404(CoastalModel, name='SWAN')\n swmodel.save()\n swmodel.input_dir = swmodel.model_input.input_dir\n swmodel.name = \"%s-I%02d\" % (swmodel.model_input.name, swmodel.id)\n swmodel.save()\n modelinput = get_object_or_404(ModelInput, pk=swmodel.model_input.id)\n modelinput.parfile_ready = True\n modelinput.upload_parfile = False\n modelinput.save()\n # export to txt file for job submission\n swaninput_exporttxt(swmodel.id)\n\n return redirect(reverse(\"coastalmodels_swaninputlist\"))\n\n return render_to_response('coastalmodels/swan/swaninput_add.html',\n {'swaninput_form': form},\n context_instance=RequestContext(request))\n\n\n@login_required\ndef swaninput_view(request, swaninput_id):\n # sticks in a POST or renders empty form\n object = get_object_or_404(SWANInput, pk=swaninput_id)\n if request.user == object.user or request.user.groups.filter(name=object.group):\n return render_to_response('coastalmodels/swan/swaninput_detail.html',\n {'object': object},\n context_instance=RequestContext(request))\n else:\n return redirect(reverse(\"coastalmodels_swaninputlist\"))\n\n\n@login_required\ndef swaninput_edit(request, swaninput_id):\n # sticks in a POST or renders empty form\n swaninput = get_object_or_404(SWANInput, pk=swaninput_id)\n\n if request.user != swaninput.user:\n return redirect(reverse(\"coastalmodels_modelinputlist\"))\n\n form = SWANInputForm(request.user, request.POST or None, instance=swaninput)\n\n if form.is_valid():\n swmodel = form.save(commit=False)\n #add extra attributes that users shall not add\n swmodel.last_modified = datetime.now()\n swmodel.name = \"%s-I%02d\" % (swmodel.model_input.name, swmodel.id)\n swaninput_exporttxt(swaninput_id)\n swmodel.save()\n\n # for illegal request we simply ignore.\n return redirect(reverse(\"coastalmodels_swaninputlist\"))\n\n else:\n return render_to_response('coastalmodels/swan/swaninput_edit.html',\n {'swaninput_form': form, 'swaninput_id': swaninput_id},\n context_instance=RequestContext(request))\n\n\n@login_required\ndef swaninput_delete(request, swaninput_id):\n swaninput = get_object_or_404(SWANInput, pk=swaninput_id)\n\n if request.user == swaninput.user:\n swaninput.delete()\n\n # for illegal request we simple ignore.\n return redirect(reverse(\"coastalmodels_swaninputlist\"))\n\n# TODO: move swaninput_modeltodict and swaninput_exporttxt in the model definition\ndef swaninput_modeltodict(swaninput_id):\n swaninput = get_object_or_404(SWANInput, pk=swaninput_id)\n swaninputdict = SortedDict(swaninput.get_fields())\n # swaninputdict = SortedDict(model_to_dict(SWANInput.objects.get(pk=swaninput_id),\n # exclude=['id','user', 'group', 'model_type', 'time_created', 'last_modified']))\n del swaninputdict['ID']\n del swaninputdict['name']\n del swaninputdict['project']\n del swaninputdict['model input']\n del swaninputdict['user']\n del swaninputdict['group']\n del swaninputdict['input dir']\n del swaninputdict['model']\n del swaninputdict['time created']\n del swaninputdict['last modified']\n # reconstruct COMPUTE line before deleting\n swaninputdict['compute'] = \"nonst %s %s hr %s\" % (\n swaninputdict['time start'], swaninputdict['time interval'], swaninputdict['time end'])\n del swaninputdict['time start']\n del swaninputdict['time end']\n del swaninputdict['time interval']\n\n return swaninputdict\n\n\ndef swaninput_exporttxt(swaninput_id):\n swaninput = SWANInput.objects.get(pk=swaninput_id)\n swaninputdict = swaninput_modeltodict(swaninput_id)\n swaninput_dir = MEDIA_ROOT + swaninput.input_dir\n mkdir_p(swaninput_dir)\n swaninput_file = os.path.abspath('%s/%s' % (swaninput_dir, swaninput.name))\n\n if os.path.exists(swaninput_file):\n os.remove(swaninput_file)\n\n f = open(swaninput_file, 'w')\n\n for name, value in swaninputdict.items():\n f.write(\"%s %s\\n\" % (name, value))\n f.write('stop')\n f.close()\n\n\ndef swaninput_export(request, swaninput_id):\n swaninputdict = swaninput_modeltodict(swaninput_id)\n # we save this\n if swaninputdict:\n response = render_to_response(\"coastalmodels/swan/swaninput_export.html\",\n {'data': swaninputdict}, context_instance=RequestContext(request), mimetype='text/txt')\n response[\"Content-Disposition\"] = \"attachment; filename=SWAN_INPUT.txt\"\n return response\n return redirect(reverse(\"coastalmodels_modelinputlist\"))\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"JSON response class.\"\"\"\n def __init__(self,obj='',json_opts={},mimetype=\"application/json\",*args,**kwargs):\n content = simplejson.dumps(obj,**json_opts)\n super(JSONResponse,self).__init__(content,mimetype,*args,**kwargs)\n","repo_name":"mvrk/TEAKWOOD","sub_path":"utils/recycled/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"43561917523","text":"# Example 1\nfor name in ['Jock', 'Nele', 'Alan']:\n print(\"Hello \" + name)\n\n# Example 2\nmy_list = [\"Hello\", \"how\", \"are\", \"you\"]\nfor word in my_list:\n print(word.upper())\n\n# Example 3\nfor num in [1, 2, 3, 4]:\n if num % 2 == 0:\n print(num)\n\n# Example 4\nnames = ['Jock', 'Nele', 'QUIT', 'Alan']\nfor name in names:\n if name == 'QUIT':\n break # break breaks the loop\n print(name)\n\n# Example 5\nnames = ['Jock', 'Nele', 'QUIT', 'Alan']\nfor name in names:\n if name == 'QUIT':\n continue # continue skips a step\n print(name)\n","repo_name":"jock-dalby/pythonTreehouse","sub_path":"python_basics/for_loop.py","file_name":"for_loop.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"70812592454","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensor2tensor.layers import common_layers\nimport tensorflow as tf\nimport numpy as np\nimport re\n\nflags = tf.app.flags\nflags.DEFINE_bool('use_decay', False,\n 'Whether to use decay or not in the update equations.')\nflags.DEFINE_bool('use_sigmoid', False,\n 'Whether to use sigmoid transformation over the sum/can'\\\n 'be replaced by any bijective non-linearity')\nflags.DEFINE_enum('function_type', 'sigmoid', ['sigmoid', 'tanh'],\n 'Non-linearity to be applied to the system.')\nflags.DEFINE_bool('dampen_grad', False,\n 'Whether to dampen the gradients passing on to the variables')\nFLAGS = flags.FLAGS\n\nLAYER_RE = re.compile(\".*rev_mp/layer_([0-9]*)/([fgb])/.*\")\n\ndef _print_tensors(*args):\n \"\"\"Print tensor shapes while graph construction.\"\"\"\n print ('Printing Tensors =====================')\n for idxs, arg in enumerate(args):\n if isinstance(arg, list):\n for idx, val in enumerate(arg):\n print (val)\n else:\n print (arg)\n print ('End Printing Tensors ==================')\n\n\ndef _get_variable_util(f, g, variables, num_layers):\n \"\"\"Get back the variable list from scopes.\n Used from tensor2tensor revnet block.\"\"\"\n f_vars = []\n g_vars = []\n vars_in_scope = variables\n var_name_dict = dict()\n id_cntr = 0\n\n # Assumes that parameters across layers are shared\n for i, var in enumerate(vars_in_scope):\n print (var.name)\n regex = LAYER_RE.match(var.name)\n layer_num = int(regex.group(1))\n fn_name = regex.group(2)\n\n if var.name not in var_name_dict:\n var_name_dict[var.name] = id_cntr\n id_cntr += 1\n\n if fn_name == 'f' or fn_name == 'b':\n f_vars.append(var)\n if fn_name == 'g' or fn_name == 'b':\n g_vars.append(var)\n\n return f_vars, g_vars, var_name_dict\n\ndef message_passing_step(inputs,\n msg_fn,\n agg_fn,\n state_fn,\n *args):\n \"\"\"Vanilla Message passing step implementing f/g.\"\"\"\n num_nodes = args[0]\n adj_mat = args[1]\n dims_for_state_fn = args[2]\n incr_node_rep = inputs\n\n for i in range(1):\n # Message passing code here\n init_shape = tf.shape(incr_node_rep)\n temp_t = tf.tile(tf.expand_dims(incr_node_rep,2),\n [1, 1, init_shape[1], 1])\n\n m_t = msg_fn(temp_t)\n incoming_node = agg_fn(m_t*tf.expand_dims(adj_mat, 3))\n incr_node_rep = state_fn(incr_node_rep, incoming_node,\n {'dim': dims_for_state_fn})\n\n return incr_node_rep\n\n\ndef _mp_forward(xs, f, g, parity, lower_triangular_op=False,\n lambda_t=0):\n \"\"\"One layer of message passing.\"\"\"\n h0, h1 = xs\n non_lin_fn = None\n if FLAGS.function_type == \"sigmoid\":\n non_lin_fn = tf.nn.sigmoid\n elif FLAGS.function_type == \"tanh\":\n non_lin_fn = tf.nn.tanh\n if parity:\n if FLAGS.use_decay:\n h0_next = f(h1) + h0*lambda_t\n elif FLAGS.use_sigmoid:\n h0_next = non_lin_fn(f(h1) + h0)\n else:\n h0_next = f(h1) + h0\n h1_next = h1\n else:\n h0_next = h0\n if FLAGS.use_decay:\n h1_next = g(h0) + h1*lambda_t\n elif FLAGS.use_sigmoid:\n h1_next = non_lin_fn(g(h0) + h1)\n else:\n h1_next = h1 + g(h0)\n\n return (h0_next, h1_next)\n\ndef _mp_backward(ys, grad_ys, f, g, parity, f_vars, g_vars, lambda_t=0):\n \"\"\"Backprop operation for one layer.\"\"\"\n y0, y1 = ys\n grad_y0, grad_y1 = grad_ys\n\n # Compute the parameter values for the previous step\n if parity:\n x1 = y1\n y1_stop = tf.stop_gradient(y1)\n x1_stop = tf.stop_gradient(x1)\n fy1 = f(y1_stop)\n if FLAGS.use_decay:\n x0 = (y0 - fy1)/(lambda_t)\n elif FLAGS.use_sigmoid:\n y0_stop = tf.stop_gradient(y0)\n if FLAGS.function_type == \"tanh\":\n x0 = 0.5*(tf.log(1 + y0_stop) - tf.log(1 - y0_stop)) - fy1\n elif FLAGS.function_type == \"sigmoid\":\n x0 = tf.log(y0_stop + 1e-10) - tf.log(1 - y0_stop + 1e-10) - fy1\n else:\n x0 = y0 - fy1\n else:\n x0 = y0\n y0_stop = tf.stop_gradient(y0)\n x0_stop = tf.stop_gradient(x0)\n gy0 = g(y0_stop)\n if FLAGS.use_decay:\n x1 = (y1 - gy0)/(lambda_t)\n elif FLAGS.use_sigmoid:\n y1_stop = tf.stop_gradient(y1)\n if FLAGS.function_type == \"tanh\":\n x1 = 0.5*(tf.log(1 + y1_stop) - tf.log(1 - y1_stop)) - gy0\n else:\n x1 = tf.log(y1_stop + 1e-10) - tf.log(1 - y1_stop + 1e-10) - gy0\n else:\n x1 = y1 - gy0\n\n # Compute the gradients with respect to x0, x1, ws\n retval = [None]\n non_lin_fn = None\n if FLAGS.function_type == \"sigmoid\":\n non_lin_fn = tf.nn.sigmoid\n elif FLAGS.function_type == \"tanh\":\n non_lin_fn = tf.nn.tanh\n\n if parity:\n grad_fy1 = tf.gradients(fy1, y1_stop, grad_y0)[0]\n if FLAGS.use_decay:\n grad_x0 = lambda_t*grad_y0\n grad_x1 = grad_y1 + grad_fy1\n grad_w_f = [gr for gr in tf.gradients(fy1, f_vars, grad_y0)]\n elif FLAGS.use_sigmoid:\n print ('Y0 stop: ', y0_stop, grad_y0)\n temp_y0 = tf.stop_gradient(x0) + tf.stop_gradient(fy1)\n grad_x0 = tf.gradients(non_lin_fn(temp_y0), temp_y0, grad_y0)[0]\n grad_x1 = grad_y1 + tf.gradients(fy1, y1_stop, grad_x0)[0]\n grad_w_f = tf.gradients(fy1, f_vars, grad_x0)\n else:\n grad_x0 = grad_y0\n grad_x1 = grad_y1 + grad_fy1\n grad_w_f = tf.gradients(fy1, f_vars, grad_y0)\n retval = [(x0, x1), (grad_x0, grad_x1), grad_w_f]\n else:\n grad_gy0 = tf.gradients(gy0, y0_stop, grad_y1)[0]\n if FLAGS.use_decay:\n grad_x1 = lambda_t*grad_y1\n grad_x0 = grad_y0 + grad_gy0\n grad_w_g = [gr for gr in tf.gradients(gy0, g_vars, grad_y1)]\n elif FLAGS.use_sigmoid:\n temp_y1 = tf.stop_gradient(x1) + tf.stop_gradient(gy0)\n grad_x1 = tf.gradients(non_lin_fn(temp_y1), temp_y1, grad_y1)[0]\n grad_x0 = grad_y0 + tf.gradients(gy0, y0_stop, grad_x1)\n grad_w_g = tf.gradients(gy0, g_vars, grad_x1)\n else:\n grad_x1 = grad_y1\n grad_x0 = grad_y0 + grad_gy0\n grad_w_g = tf.gradients(gy0, g_vars, grad_y1)\n retval = [(x0, x1), (grad_x0, grad_x1), grad_w_g]\n\n retval_t = tf.tuple(tf.contrib.framework.nest.flatten(retval))\n retval_tupled = tf.contrib.framework.nest.pack_sequence_as(retval, retval_t)\n return retval\n\ndef _rev_mp_block_forward(x0, x1, f, g, num_layers=1):\n \"\"\"Forward computation for a series of layers.\"\"\"\n out = (x0, x1)\n # Perform f step once and g step once (this comprises one message passing\n # step in the system.\n lambda_series = [1.0/np.sqrt(cnt+1) for cnt in range(2*num_layers)]\n for i in range(num_layers):\n prev_out = out\n out = _mp_forward(out, f[i], g[i], 1, lambda_t=lambda_series[2*i])\n out = _mp_forward(out, f[i], g[i], 0, lambda_t=lambda_series[2*i+1])\n\n y0, y1 = out\n return y0, y1\n\n\nclass RevMessagePassingBlock(object):\n \"\"\"Block to perform reversible message passing.\"\"\"\n\n def __init__(self,\n f,\n g,\n num_layers=1,\n is_training=True,\n use_efficient_backprop=True):\n\n if isinstance(f, list):\n assert len(f) == num_layers\n else:\n f = [f]* num_layers\n\n if isinstance(g, list):\n assert len(g) == num_layers\n else:\n g = [g]*num_layers\n\n scope_prefix = \"rev_mp/layer_%d/\"\n f_scope = scope_prefix + \"f\"\n g_scope = scope_prefix + \"g\"\n\n self.f = f\n self.g = g\n\n self.num_layers = num_layers\n self.is_training = is_training\n\n self._use_efficient_backprop = use_efficient_backprop\n\n def _efficient_grad_fn(self, inputs,\n variables,\n ys,\n grad_ys):\n \"\"\"Computes gradient for a block of rev GNN layers.\"\"\"\n f_vars, g_vars, var_names = _get_variable_util(self.f,\n self.g, variables, self.num_layers)\n\n _print_tensors(grad_ys)\n f_var_grads = []\n g_var_grads = []\n\n # Reversing essential while gradient computation\n f = list(self.f)\n g = list(self.g)\n f.reverse()\n g.reverse()\n\n lambda_series = [1.0/(np.sqrt(cnt+1)) for cnt in range(2*self.num_layers)]\n lambda_series.reverse()\n for i in range(self.num_layers):\n ys, grad_ys, grad_w_g = _mp_backward(ys, grad_ys, f[i], g[i],\n 0, f_vars, g_vars, lambda_t=lambda_series[2*i])\n ys, grad_ys, grad_w_f = _mp_backward(ys, grad_ys, f[i], g[i],\n 1, f_vars, g_vars, lambda_t=lambda_series[2*i+1])\n g_var_grads.append(grad_w_g)\n f_var_grads.append(grad_w_f)\n\n # Reverse variable grads: as variable utility outputs reverse\n f_var_grads.reverse()\n g_var_grads.reverse()\n variable_grads = [None]*len(variables)\n tmp_cntr = 0\n variable_mappings = dict()\n for idx, v in enumerate(variables):\n variable_mappings[v.name] = idx\n\n # grad_w_g = [variables]\n # Assumption: all variables are present at all time steps\n num_vars_f = len(f_var_grads[0])\n assert num_vars_f == len(f_var_grads[1]), \"Number of variables in f\"\n num_vars_g = len(g_var_grads[0])\n assert num_vars_g == len(g_var_grads[1]), \"Number of variables in g\"\n\n for idxs, values in enumerate(f_var_grads):\n for var_t, grad in list(zip(f_vars, values)):\n indx = variable_mappings[var_t.name]\n if isinstance(grad, tf.IndexedSlices):\n grad = tf.convert_to_tensor(grad)\n variable_grads[indx] = (variable_grads[indx] + grad\n if variable_grads[indx] is not None else grad if\n grad is not None else variable_grads[idx])\n\n for idxs, values in enumerate(g_var_grads):\n for var_t, grad in list(zip(g_vars, values)):\n indx = variable_mappings[var_t.name]\n if isinstance(grad, tf.IndexedSlices):\n grad = tf.convert_to_tensor(grad)\n variable_grads[indx] = (variable_grads[indx] + grad\n if variable_grads[indx] is not None else grad)\n\n grad_x0, grad_x1 = grad_ys\n # _print_tensors(grad_x0, grad_x1, variable_grads)\n return [grad_x0, grad_x1], variable_grads\n\n def forward(self, x0, x1):\n custom_grad_fn = (self._efficient_grad_fn if self._use_efficient_backprop\n else None)\n\n # @common_layers.fn_with_custom_grad(custom_grad_fn)\n def _forward(x0, x1):\n return _rev_mp_block_forward(x0, x1, self.f, self.g,\n self.num_layers)\n\n return _forward(x0, x1)\n\n def backward(self, y0, y1):\n f = list(self.f)\n g = list(self.g)\n f.reverse()\n g.reverse()\n f_vars = [v for v in tf.trainable_variables() if 'rev_mp/layer_0' in v.name]\n\n print (y0, y1)\n for i in range(self.num_layers):\n x0_i = y0\n if FLAGS.use_sigmoid:\n if FLAGS.function_type == \"sigmoid\":\n x1_i = tf.log(y1 + 1e-10) - tf.log(1 - y1 + 1e-10) - g[i](y0)\n else:\n x1_i = 0.5*(tf.log(1 + y1 + 1e-10) - tf.log(1 - y1 + 1e-10)) - g[i](x0_i)\n else:\n x1_i = y1 - g[i](x0_i)\n x1 = x1_i\n if FLAGS.use_sigmoid:\n if FLAGS.function_type == \"sigmoid\":\n x0 = tf.log(x0_i + 1e-10) - tf.log(1 - x0_i + 1e-10) - f[i](x1)\n else:\n x0 = 0.5*(tf.log(1 + x0_i + 1e-10) - tf.log(1 - x0_i + 1e-10)) - f[i](x1)\n else:\n x0 = x0_i - f[i](x1)\n y1, y0 = x1, x0\n # y1 = tf.Print(y1, f_vars, summarize=10, message='var')\n\n f.reverse()\n g.reverse()\n return y0, y1\n\n\ndef rev_mp_block(x0, x1, f, g, num_layers=1, is_training=True):\n \"\"\"Block of reversible message passing.\"\"\"\n print ('INFO: Number of layers for message passing is ', num_layers)\n tf.logging.debug('Number of message passing layers: %d', num_layers)\n rev_mp_unit = RevMessagePassingBlock(f, g, num_layers, is_training)\n return rev_mp_unit.forward(x0, x1)\n\ndef rev_mp_block_backward(y0, y1, f, g, num_layers=1, is_training=True):\n \"\"\"Block of reversible message passing.\"\"\"\n print ('INFO: Number of layers for message passing is ', num_layers)\n tf.logging.debug('Number of message passing layers: %d', num_layers)\n rev_mp_unit = RevMessagePassingBlock(f, g, num_layers, is_training)\n return rev_mp_unit.backward(y0, y1)\n\n","repo_name":"google/graph-gen","sub_path":"research/graph_gen/rev_GNN.py","file_name":"rev_GNN.py","file_ext":"py","file_size_in_byte":12249,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"44"} +{"seq_id":"17496377043","text":"import textwrap\nfrom traitlets.config import LoggingConfigurable\nfrom traitlets import Unicode, Set, List, Dict, Tuple, default\nimport jinja2\nimport tarfile\nimport io\nimport os\nimport stat\nimport re\nimport docker\n\nTEMPLATE = r\"\"\"\nFROM buildpack-deps:zesty\n\n# Set up locales properly\nRUN apt-get update && \\\n apt-get install --yes --no-install-recommends locales && \\\n apt-get purge && \\\n apt-get clean && \\\n rm -rf /var/lib/apt/lists/*\n\nRUN echo \"en_US.UTF-8 UTF-8\" > /etc/locale.gen && \\\n locale-gen\n\nENV LC_ALL en_US.UTF-8\nENV LANG en_US.UTF-8\nENV LANGUAGE en_US.UTF-8\n\n# Use bash as default shell, rather than sh\nENV SHELL /bin/bash\n\n# Set up user\nENV NB_USER jovyan\nENV NB_UID 1000\nENV HOME /home/${NB_USER}\n\nRUN adduser --disabled-password \\\n --gecos \"Default user\" \\\n --uid ${NB_UID} \\\n ${NB_USER}\nWORKDIR ${HOME}\n\nRUN apt-get update && \\\n apt-get install --yes --no-install-recommends \\\n {% for package in base_packages -%}\n {{ package }} \\\n {% endfor -%}\n && apt-get purge && \\\n apt-get clean && \\\n rm -rf /var/lib/apt/lists/*\n\n{% if packages -%}\nRUN apt-get update && \\\n apt-get install --yes \\\n {% for package in packages -%}\n {{ package }} \\\n {% endfor -%}\n && apt-get purge && \\\n apt-get clean && \\\n rm -rf /var/lib/apt/lists/*\n{% endif -%}\n\nEXPOSE 8888\n\n{% if env -%}\n# Almost all environment variables\n{% for item in env -%}\nENV {{item[0]}} {{item[1]}}\n{% endfor -%}\n{% endif -%}\n\n{% if path -%}\n# Special case PATH\nENV PATH {{ ':'.join(path) }}:${PATH}\n{% endif -%}\n\n{% if build_script_files -%}\n# If scripts required during build are present, copy them\n{% for src, dst in build_script_files.items() %}\nCOPY {{ src }} {{ dst }}\n{% endfor -%}\n{% endif -%}\n\n{% for sd in build_script_directives -%}\n{{sd}}\n{% endfor %}\n\n# Copy and chown stuff. This doubles the size of the repo, because\n# you can't actually copy as USER, only as root! Thanks, Docker!\nUSER root\nCOPY src/ ${HOME}\nRUN chown -R ${NB_USER}:${NB_USER} ${HOME}\n\n# Run assemble scripts! These will actually build the specification\n# in the repository into the image.\n{% for sd in assemble_script_directives -%}\n{{ sd }}\n{% endfor %}\n\n# Container image Labels!\n# Put these at the end, since we don't want to rebuild everything\n# when these change! Did I mention I hate Dockerfile cache semantics?\n{% for k, v in labels.items() -%}\nLABEL {{k}}={{v}}\n{%- endfor %}\n\n# We always want containers to run as non-root\nUSER ${NB_USER}\n\n{% if post_build_scripts -%}\n{% for s in post_build_scripts -%}\nRUN ./{{ s }}\n{% endfor %}\n{% endif -%}\n\"\"\"\n\nDOC_URL = \"http://repo2docker.readthedocs.io/en/latest/samples.html\"\n\nclass BuildPack(LoggingConfigurable):\n \"\"\"\n A composable BuildPack.\n\n Specifically used for creating Dockerfiles for use with repo2docker only.\n\n Things that are kept constant:\n - base image\n - some environment variables (such as locale)\n - user creation & ownership of home directory\n - working directory\n\n Everything that is configurable is additive & deduplicative,\n and there are *some* general guarantees of ordering.\n\n \"\"\"\n packages = Set(\n help=\"\"\"\n List of packages that are installed in this BuildPack by default.\n\n Versions are not specified, and ordering is not guaranteed. These\n are usually installed as apt packages.\n \"\"\"\n )\n\n base_packages = Set(\n {\n # Utils!\n \"less\",\n\n # FIXME: Use npm from nodesource!\n # Everything seems to depend on npm these days, unfortunately.\n \"npm\",\n \"nodejs-legacy\"\n },\n help=\"\"\"\n Base set of apt packages that are installed for all images.\n\n These contain useful images that are commonly used by a lot of images,\n where it would be useful to share a base docker image layer that contains\n them.\n\n These would be installed with a --no-install-recommends option.\n \"\"\"\n )\n\n env = List(\n [],\n help=\"\"\"\n Ordered list of environment variables to be set for this image.\n\n Ordered so that environment variables can use other environment\n variables in their values.\n\n Expects tuples, with the first item being the environment variable\n name and the second item being the value.\n \"\"\"\n )\n\n path = List(\n [],\n help=\"\"\"\n Ordered list of file system paths to look for executables in.\n\n Just sets the PATH environment variable. Separated out since\n it is very commonly set by various buildpacks.\n \"\"\"\n )\n\n labels = Dict(\n {},\n help=\"\"\"\n Docker labels to set on the built image.\n \"\"\"\n )\n\n build_script_files = Dict(\n {},\n help=\"\"\"\n List of files to be copied to the container image for use in building.\n\n This is copied before the `build_scripts` & `assemble_scripts` are\n run, so can be executed from either of them.\n\n It's a dictionary where the key is the source file path in the host\n system, and the value is the destination file path inside the\n container image.\n \"\"\"\n )\n\n build_scripts = List(\n [],\n help=\"\"\"\n Ordered list of shell script snippets to build the base image.\n\n A list of tuples, where the first item is a username & the\n second is a single logical line of a bash script that should\n be RUN as that user.\n\n These are run before the source of the repository is copied\n into the container image, and hence can not reference stuff\n from the repository. When the build scripts are done, the\n container image should be in a state where it is generically\n re-useable for building various other repositories with\n similar environments.\n\n You can use environment variable substitutions in both the\n username and the execution script.\n \"\"\"\n )\n\n assemble_scripts = List(\n [],\n help=\"\"\"\n Ordered list of shell script snippets to build the repo into the image.\n\n A list of tuples, where the first item is a username & the\n second is a single logical line of a bash script that should\n be RUN as that user.\n\n These are run after the source of the repository is copied into\n the container image (into the current directory). These should be\n the scripts that actually build the repository into the container\n image.\n\n If this needs to be dynamically determined (based on the presence\n or absence of certain files, for example), you can create any\n method and decorate it with `traitlets.default('assemble_scripts)`\n and the return value of this method is used as the value of\n assemble_scripts. You can expect that the script is running in\n the current directory of the repository being built when doing\n dynamic detection.\n\n You can use environment variable substitutions in both the\n username and the execution script.\n \"\"\"\n )\n\n post_build_scripts = List(\n [],\n help=\"\"\"\n An ordered list of executable scripts that should be executed after build.\n\n Is run as a non-root user, and must be executable. Used for doing things\n that are currently not supported by other means!\n\n The scripts should be as deterministic as possible - running it twice\n should not produce different results!\n \"\"\"\n )\n\n name = Unicode(\n help=\"\"\"\n Name of the BuildPack!\n \"\"\"\n )\n\n components = Tuple(())\n\n def compose_with(self, other):\n \"\"\"\n Compose this BuildPack with another, returning a new one\n\n Ordering does matter - the properties of the current BuildPack take\n precedence (wherever that matters) over the properties of other\n BuildPack. If there are any conflicts, this method is responsible\n for resolving them.\n \"\"\"\n result = BuildPack(parent=self)\n labels = {}\n labels.update(self.labels)\n labels.update(other.labels)\n result.labels = labels\n result.packages = self.packages.union(other.packages)\n result.base_packages = self.base_packages.union(other.base_packages)\n result.path = self.path + other.path\n # FIXME: Deduplicate Env\n result.env = self.env + other.env\n result.build_scripts = self.build_scripts + other.build_scripts\n result.assemble_scripts = self.assemble_scripts + other.assemble_scripts\n result.post_build_scripts = self.post_build_scripts + other.post_build_scripts\n\n build_script_files = {}\n build_script_files.update(self.build_script_files)\n build_script_files.update(other.build_script_files)\n result.build_script_files = build_script_files\n\n result.name = \"{}-{}\".format(self.name, other.name)\n\n result.components = (self, ) + self.components + (other, ) + other.components\n return result\n\n def binder_path(self, path):\n \"\"\"Locate a file\"\"\"\n if os.path.exists('binder'):\n return os.path.join('binder', path)\n else:\n return path\n\n def detect(self):\n return all([p.detect() for p in self.components])\n\n def render(self):\n \"\"\"\n Render BuildPack into Dockerfile\n \"\"\"\n t = jinja2.Template(TEMPLATE)\n\n build_script_directives = []\n last_user = 'root'\n for user, script in self.build_scripts:\n if last_user != user:\n build_script_directives.append(\"USER {}\".format(user))\n last_user = user\n build_script_directives.append(\"RUN {}\".format(\n textwrap.dedent(script.strip('\\n'))\n ))\n\n assemble_script_directives = []\n last_user = 'root'\n for user, script in self.assemble_scripts:\n if last_user != user:\n assemble_script_directives.append(\"USER {}\".format(user))\n last_user = user\n assemble_script_directives.append(\"RUN {}\".format(\n textwrap.dedent(script.strip('\\n'))\n ))\n\n return t.render(\n packages=sorted(self.packages),\n path=self.path,\n env=self.env,\n labels=self.labels,\n build_script_directives=build_script_directives,\n assemble_script_directives=assemble_script_directives,\n build_script_files=self.build_script_files,\n base_packages=sorted(self.base_packages),\n post_build_scripts=self.post_build_scripts,\n )\n\n def build(self, image_spec):\n tarf = io.BytesIO()\n tar = tarfile.open(fileobj=tarf, mode='w')\n dockerfile_tarinfo = tarfile.TarInfo(\"Dockerfile\")\n dockerfile = self.render().encode('utf-8')\n dockerfile_tarinfo.size = len(dockerfile)\n\n tar.addfile(\n dockerfile_tarinfo,\n io.BytesIO(dockerfile)\n )\n\n def _filter_tar(tar):\n # We need to unset these for build_script_files we copy into tar\n # Otherwise they seem to vary each time, preventing effective use\n # of the cache!\n # https://github.com/docker/docker-py/pull/1582 is related\n tar.uname = ''\n tar.gname = ''\n tar.uid = 1000\n tar.gid = 1000\n return tar\n\n for src in sorted(self.build_script_files):\n src_parts = src.split('/')\n src_path = os.path.join(os.path.dirname(__file__), *src_parts)\n tar.add(src_path, src, filter=_filter_tar)\n\n tar.add('.', 'src/', filter=_filter_tar)\n\n tar.close()\n tarf.seek(0)\n\n client = docker.APIClient(version='auto', **docker.utils.kwargs_from_env())\n for line in client.build(\n fileobj=tarf,\n tag=image_spec,\n custom_context=True,\n buildargs={},\n decode=True,\n forcerm=True,\n rm=True\n ):\n yield line\n\n\nclass BaseImage(BuildPack):\n name = \"repo2docker\"\n version = \"0.1\"\n\n env = [\n (\"APP_BASE\", \"/srv\")\n ]\n\n def detect(self):\n return True\n\n @default('assemble_scripts')\n def setup_assembly(self):\n assemble_scripts = []\n try:\n with open(self.binder_path('apt.txt')) as f:\n extra_apt_packages = [l.strip() for l in f]\n # Validate that this is, indeed, just a list of packages\n # We're doing shell injection around here, gotta be careful.\n # FIXME: Add support for specifying version numbers\n for p in extra_apt_packages:\n if not re.match(r\"^[a-z0-9.+-]+\", p):\n raise ValueError(\"Found invalid package name {} in apt.txt\".format(p))\n\n assemble_scripts.append((\n 'root',\n r\"\"\"\n apt-get update && \\\n apt-get install --yes --no-install-recommends {} && \\\n apt-get purge && \\\n apt-get clean && \\\n rm -rf /var/lib/apt/lists/*\n \"\"\".format(' '.join(extra_apt_packages))\n ))\n except FileNotFoundError:\n pass\n return assemble_scripts\n\n @default('post_build_scripts')\n def setup_post_build_scripts(self):\n post_build = self.binder_path('postBuild')\n if os.path.exists(post_build):\n if not stat.S_IXUSR & os.stat(post_build).st_mode:\n raise ValueError(\"%s is not executable, see %s for help.\" % (\n post_build, DOC_URL+'#system-post-build-scripts'))\n return [post_build]\n return []\n","repo_name":"17CodingNet/repo2docker","sub_path":"repo2docker/buildpacks/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":13834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"44"} +{"seq_id":"28093718214","text":"from abc import ABC, abstractmethod\nfrom typing import Optional\nfrom domain.user.user_repsotory import UserRepository\nfrom usecase.user.user_query_model import UserReadModel\nfrom usecase.user.user_query_service import UserQueryService\n\n\nclass UserQueryUseCase(ABC):\n \"\"\"UserQueryUseCase defines a command usecase inteface related User entity.\"\"\"\n\n @abstractmethod\n def fetch_user_by_id(self, id: str) -> Optional[UserReadModel]:\n raise NotImplementedError\n\n @abstractmethod\n def fetch_users(self) -> Optional[list[UserReadModel]]:\n raise NotImplementedError\n\n @abstractmethod\n def fetch_user_by_email(self, email: str) -> Optional[UserReadModel]:\n raise NotImplementedError\n\n\nclass UserQueryUseCaseImpl(UserQueryUseCase):\n \"\"\"UserQueryUseCaseImpl implements a command usecases related User entity.\"\"\"\n\n def __init__(self, user_query_service: UserQueryService):\n self.user_query_service: UserQueryService = user_query_service\n\n def fetch_user_by_id(self, id: str) -> Optional[UserReadModel]:\n try:\n user = self.user_query_service.find_by_id(id)\n if user is None:\n raise ValueError(\"User not found\")\n\n except:\n raise ValueError(\"User not found\")\n\n return user\n\n def fetch_users(self) -> Optional[list[UserReadModel]]:\n try:\n users = self.user_query_service.find_all()\n\n except:\n raise ValueError(\"Users not found\")\n\n return users\n\n def fetch_user_by_email(self, email: str) -> Optional[UserReadModel]:\n try:\n user = self.user_query_service.find_by_email(email)\n if user is None:\n raise ValueError(\"User not found\")\n\n except:\n raise ValueError(\"User not found\")\n\n return user\n","repo_name":"rikuto125/python-jwt-fastapi-ddd","sub_path":"usecase/user/user_query_usecase.py","file_name":"user_query_usecase.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"15102677771","text":"\"\"\"RDF processing module.\n\nThis is tailored for the MutopiaProject but has a few generic\nmechanisms that can be applied to other projects using RDF.\n\n\"\"\"\n\n__docformat__ = 'reStructuredText'\n\nimport xml.etree.ElementTree as ET\n\n\nRDF_NS = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'\nMP_NS = 'http://www.mutopiaproject.org/piece-data/0.1/'\n\n# A simple class to simplify namespace usage.\n# From effbot (Fredrik Lundh)\nclass NS:\n def __init__(self, uri):\n self.uri = '{'+uri+'}'\n def __getattr__(self, tag):\n return self.uri + tag\n def __call__(self, path):\n return \"/\".join(getattr(self, tag) for tag in path.split(\"/\"))\n\n\n# Convenient namespace definitions for this module\n_MP = NS(MP_NS)\n_RDF = NS(RDF_NS)\n\n# This list sets the order for the RDF output. This is not strictly\n# necessary --- RDF parsers really don't care about order --- but it\n# is more human friendly to have a consistent order.\n_MU_KEYS = ['title', 'composer', 'opus',\n 'lyricist', 'for', 'date', 'style',\n 'metre', 'arranger', 'source',\n 'licence',\n 'lyFile', 'midFile',\n 'psFileA4', 'pdfFileA4',\n 'psFileLet', 'pdfFileLet',\n 'pngFile', 'pngHeight', 'pngWidth',\n 'id',\n 'maintainer', 'maintainerEmail', 'maintainerWeb',\n 'moreInfo', 'lilypondVersion',\n]\n\n\nclass MuRDF:\n def __init__(self):\n # Start an RDF document\n self.top = ET.Element(_RDF('RDF'))\n\n # The RDF document tree has a description as its sole element.\n self.description = ET.SubElement(self.top,\n _RDF('Description'),\n {_RDF('about'): '.'})\n # Generate the RDF with all elements in the order we want.\n # Ordering is mostly cosmetic but we still want to create a\n # blank-filled RDF of our expected elements.\n for key in _MU_KEYS:\n ET.SubElement(self.description, _MP(key))\n\n\n def update_description(self, name, value):\n \"\"\"Update a description element in the RDF.\n\n This is an update not an insert so it expects to find `name`\n in the description of the RDF.\n\n :param str name: The name of an existing node in \n description.\n :param str value: The new value of the named node.\n :returns: True if node is found and updated.\n :rtype: boolean\n\n \"\"\"\n node = self.description.find(_MP(name))\n if node is None:\n return False\n node.text = value\n return True\n\n\n @classmethod\n def indent(cls, elem, level=0):\n \"\"\"Indent xml tree in place.\n\n :param xml.etree.ElementTree elem: RDF to indent.\n :param int level: level for indent (recursive routine).\n\n \"\"\"\n # also from effbot (Fredrik Lundh)\n\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n cls.indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n\n def write_xml(self, path):\n \"\"\"Write the ElementTree to RDF/XML.\n\n :param path: Name of output file.\n\n \"\"\"\n # Registration of the namespace allows the output to define\n # xmlns attributes in the RDF header.\n ET.register_namespace('rdf', RDF_NS)\n ET.register_namespace('mp', MP_NS)\n MuRDF.indent(self.top)\n root = ET.ElementTree(element=self.top)\n with open(path, 'wb') as rdfout:\n root.write(rdfout, encoding='UTF-8', xml_declaration=True)\n","repo_name":"MutopiaProject/mupub","sub_path":"mupub/rdfu.py","file_name":"rdfu.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"25555614448","text":"import ctypes\n\nlib = ctypes.CDLL('./mylib.dll')\n\n# int get_new_mas(int *src, int src_len, int *dst, int *dst_len)\n_get_new_mas = lib.get_new_mas\n_get_new_mas.argtypes = (ctypes.POINTER(ctypes.c_int), ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))\n_get_new_mas.restype = ctypes.c_int\n\ndef get_new_mas(nums):\n n = len(nums)\n arr = (ctypes.c_int * n)(*nums)\n\n n_res = ctypes.c_int(0)\n\n rc = _get_new_mas(arr, n, None, n_res)\n if rc:\n res = (ctypes.c_int * n_res.value)()\n\n rc = _get_new_mas(arr, n, res, n_res)\n\n return rc, list(res)\n else:\n return rc, list()\n\ndef get_new_mas2(nums):\n n = len(nums)\n arr = (ctypes.c_int * n)(*nums)\n\n m = n + 1\n n_res = ctypes.c_int(m)\n\n res = (ctypes.c_int * m)()\n\n rc = _get_new_mas(arr, n, res, n_res)\n real_res = list()\n for i in range(n_res.value):\n real_res.append(res[i])\n return rc, list(real_res)\n\n# void sdvig(int* mas, int n, int k);\n_sdvig = lib.sdvig\n_sdvig.argtypes = (ctypes.POINTER(ctypes.c_int), ctypes.c_int, ctypes.c_int)\n_sdvig.restype = None\n\ndef sdvig(nums, k):\n n = len(nums)\n arr = (ctypes.c_int * n)(*nums)\n\n _sdvig(arr, n, k)\n return list(arr)\n\nfrom tkinter import *\n\ndef print_new_mas(event):\n try:\n nums = list(map(int,ent1.get().split()))\n print(nums)\n #ЗДЕСЬ МЕНЯТЬ СПОСБО ВЫДЕЛЕНИЯ ПАМЯТИ:\n x, y = get_new_mas2(nums)\n if (x == 0):\n print(y)\n B_label.config(text = y)\n except:\n B_label.config(text = \"Ошибка ввода\")\n \n\ndef print_sdvig(event):\n try:\n nums = list(map(int,ent1.get().split()))\n print(nums)\n k = int(ent2.get())\n print(k)\n nums = sdvig(nums, k)\n print(nums)\n D_label.config(text = nums)\n except:\n D_label.config(text = \"Ошибка ввода\")\n\n#Creating window\nwindow = Tk()\nwindow.title(\"Lab_12_1_2\") \nwindow.geometry(\"400x260+800+400\")\n\nA_label = Label(window, text = 'Исходный массив')\nA_label.place(x = 60, y = 2, width = 280, height = 20)\n\nent1 = Entry(window, width=20, bd=2)\nent1.insert(0, '0')\nent1.place(x = 60, y = 26, width = 280, height = 20)\n\nB_label = Label(window, text = 'Результат')\nB_label.place(x = 60, y = 78, width = 280, height = 20)\n\ncreate_btn = Button(window, width = 10, text = \"Get_new_mas\")\ncreate_btn.bind(\"\", print_new_mas)\ncreate_btn.place(x = 100, y = 52, width = 200, height = 25)\n\nC_label = Label(window, text = 'Сдвиг влево на: ')\nC_label.place(x = 60, y = 104, width = 280, height = 20)\n\nent2 = Entry(window, width=280,bd=2)\nent2.insert(0, '0')\nent2.place(x = 60, y = 130, width = 280, height = 20)\n\ncreate_btn = Button(window, width = 10, text = \"Sdvig\")\ncreate_btn.bind(\"\", print_sdvig)\ncreate_btn.place(x = 100, y = 156, width = 200, height = 25)\n\nD_label = Label(window, text = 'Результат')\nD_label.place(x = 60, y = 182, width = 280, height = 20)\n\nwindow.mainloop()\n","repo_name":"Inlucker/University","sub_path":"ProgC/Lab_12_1_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"18326172022","text":"from kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.screenmanager import ScreenManager, Screen\n\nclass MainScreen(Screen):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n layout = BoxLayout(orientation='vertical')\n self.info_label = Label(text=\"Программа тренировок на неделю!\")\n layout.add_widget(self.info_label)\n self.buttons_info = {'День 1': '1aawdadaw1',\n 'День 2': 'awdawwfawd',\n 'День 3': 'Information for button 3',\n 'День 4': 'Information for button 4',\n 'День 5': 'Information for button 5',\n 'День 6': '12312513',\n 'День 7': 'awdafgeaw',}\n\n for i in range(7):\n btn = Button(text=\"Button \" + str(i + 1))\n btn.bind(on_press=self.open_info)\n layout.add_widget(btn)\n\n self.add_widget(layout)\n\n def open_info(self, instance):\n self.manager.current = 'info'\n self.manager.get_screen('info').update_info(instance.text)\n\nclass InfoScreen(Screen):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n layout = BoxLayout(orientation='vertical')\n self.info_label = Label(text=\"\")\n layout.add_widget(self.info_label)\n\n back_button = Button(text=\"Back\")\n back_button.bind(on_press=self.go_back)\n layout.add_widget(back_button)\n\n self.add_widget(layout)\n\n def update_info(self, button_text):\n self.info_label.text = \"You clicked on \" + button_text\n\n def go_back(self, instance):\n self.manager.current = 'main'\n\nclass MyApp(App):\n def build(self):\n sm = ScreenManager()\n sm.add_widget(MainScreen(name='main'))\n sm.add_widget(InfoScreen(name='info'))\n return sm\n\nif __name__ == \"__main__\":\n MyApp().run()\n\n","repo_name":"ArturMeijers/kivyprograma","sub_path":"awdawdawdafa.py","file_name":"awdawdawdafa.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"40622551924","text":"# import ca.nengo.model.StructuralException;\n# import ca.nengo.model.impl.NetworkImpl;\n# import ca.nengo.ui.models.nodes.UINetwork;\n\nfrom . import run_example\n\n\ndef processNetwork(network):\n network.closeViewer()\n network.createBrainViewer()\n\nrun_example(NetworkImpl(), processNetwork)\n","repo_name":"tbekolay/nengo_java_gui","sub_path":"nengo/gui/test/brainview.py","file_name":"brainview.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"44"} +{"seq_id":"72374881092","text":"from decimal import Decimal\n\nfrom .ema import Ema\n\n\n# Klinger Volume Oscillator\nclass Kvo:\n value: Decimal = Decimal(\"0.0\")\n\n _short_ema: Ema\n _long_ema: Ema\n _prev_hlc: Decimal = Decimal(\"0.0\")\n _prev_dm: Decimal = Decimal(\"0.0\")\n _cm: Decimal = Decimal(\"0.0\")\n _trend: int = 0\n _t: int = 0\n _t1: int = 2\n\n def __init__(self, short_period: int, long_period: int) -> None:\n if short_period < 1:\n raise ValueError(f\"Invalid short period ({short_period})\")\n if long_period < short_period:\n raise ValueError(\n f\"Long period ({long_period}) cannot be shorter than short period ({short_period})\"\n )\n\n self._short_ema = Ema(short_period)\n self._long_ema = Ema(long_period)\n\n @property\n def maturity(self) -> int:\n return self._t1\n\n @property\n def mature(self) -> bool:\n return self._t >= self._t1\n\n def update(self, high: Decimal, low: Decimal, close: Decimal, volume: Decimal) -> Decimal:\n self._t = min(self._t + 1, self._t1)\n\n hlc = high + low + close\n dm = high - low\n\n if self._t > 1:\n if hlc > self._prev_hlc and self._trend != 1:\n self._trend = 1\n self._cm = self._prev_dm\n elif hlc < self._prev_hlc and self._trend != -1:\n self._trend = -1\n self._cm = self._prev_dm\n self._cm += dm\n\n vf = volume * abs(dm / self._cm * 2 - 1) * 100 * self._trend\n\n self._short_ema.update(vf)\n self._long_ema.update(vf)\n\n self.value = self._short_ema.value - self._long_ema.value\n\n self._prev_dm = dm\n self._prev_hlc = hlc\n return self.value\n","repo_name":"discosultan/juno","sub_path":"juno/indicators/kvo.py","file_name":"kvo.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"44"} +{"seq_id":"71911877893","text":"import json, os, math, time\nimport KratosMultiphysics\nfrom KratosMultiphysics import Parameters, Logger\nimport KratosMultiphysics.CompressiblePotentialFlowApplication as KCPFApp\nfrom KratosMultiphysics.response_functions.response_function_interface import ResponseFunctionInterface\nimport KratosMultiphysics.CompressiblePotentialFlowApplication.potential_flow_analysis as potential_flow_analysis\nimport KratosMultiphysics.MappingApplication\n\n# Import Kratos, XMC, PyCOMPSs API\nimport KratosMultiphysics.MultilevelMonteCarloApplication\nimport xmc\nimport xmc.methodDefs_momentEstimator.computeCentralMoments as mdccm\nfrom exaqute import get_value_from_remote\n\ndef _GetModelPart(model, solver_settings):\n model_part_name = solver_settings[\"model_part_name\"].GetString()\n if not model.HasModelPart(model_part_name):\n model_part = model.CreateModelPart(model_part_name, 2)\n domain_size = solver_settings[\"domain_size\"].GetInt()\n if domain_size < 0:\n raise Exception('Please specify a \"domain_size\" >= 0!')\n model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, domain_size)\n else:\n model_part = model.GetModelPart(model_part_name)\n\n return model_part\n\nclass AdjointResponseFunction(ResponseFunctionInterface):\n\n def __init__(self, identifier, response_settings, model):\n default_parameters = KratosMultiphysics.Parameters( \"\"\"\n {\n \"response_type\": \"stochastic_adjoint_lift_potential_jump\",\n \"risk_measure\": \"expected_value\",\n \"primal_settings\": \"\",\n \"adjoint_settings\": \"\",\n \"xmc_settings\": \"\",\n \"design_surface_sub_model_part_name\": \"\",\n \"auxiliary_mdpa_path\": \"auxiliary_mdpa\",\n \"primal_data_transfer_with_python\": true,\n \"output_dict_results_file_name\": \"\",\n \"output_pressure_file_path\": \"\"\n } \"\"\" )\n response_settings.ValidateAndAssignDefaults(default_parameters)\n\n self.identifier = identifier\n self.response_settings = response_settings\n\n if not response_settings[\"primal_settings\"].GetString() == \"\":\n self.primal_settings = response_settings[\"primal_settings\"].GetString()\n else:\n raise Exception(\"Please set the path to the primal parameters in \\\"primal_settings\\\"\")\n\n if not response_settings[\"adjoint_settings\"].GetString() == \"\":\n self.adjoint_settings = response_settings[\"adjoint_settings\"].GetString()\n else:\n raise Exception(\"Please set the path to the adjoint parameters in \\\"adjoint_settings\\\"\")\n\n if not response_settings[\"xmc_settings\"].GetString() == \"\":\n self.xmc_settings_path = response_settings[\"xmc_settings\"].GetString()\n else:\n raise Exception(\"Please set the path to the XMC parameters in \\\"xmc_settings\\\"\")\n\n if not response_settings[\"design_surface_sub_model_part_name\"].GetString() == \"\":\n self.design_surface_sub_model_part_name = response_settings[\"design_surface_sub_model_part_name\"].GetString()\n else:\n raise Exception(\"Please set the name of the design surface submodelpart in \\\"design_surface_sub_model_part_name\\\"\")\n\n self.auxiliary_mdpa_path = response_settings[\"auxiliary_mdpa_path\"].GetString()\n self.risk_measure = response_settings[\"risk_measure\"].GetString()\n\n if response_settings.Has(\"output_dict_results_file_name\"):\n self.output_dict_results_file_name = response_settings[\"output_dict_results_file_name\"].GetString()\n self.results_dict = {}\n else:\n self.output_dict_results_file_name = \"\"\n\n if response_settings.Has(\"output_pressure_file_path\"):\n self.output_pressure_file_path = response_settings[\"output_pressure_file_path\"].GetString()\n else:\n self.output_pressure_file_path = \"\"\n # Create the primal solver\n with open(self.response_settings[\"primal_settings\"].GetString(),'r') as parameter_file:\n primal_parameters = Parameters( parameter_file.read() )\n\n primal_parameters = _CheckParameters(primal_parameters)\n if primal_parameters.Has(\"adjoint_parameters_path\"):\n primal_parameters[\"adjoint_parameters_path\"].SetString(self.response_settings[\"adjoint_settings\"].GetString())\n else:\n primal_parameters.AddString(\"adjoint_parameters_path\", self.response_settings[\"adjoint_settings\"].GetString())\n if primal_parameters.Has(\"design_surface_sub_model_part_name\"):\n primal_parameters[\"design_surface_sub_model_part_name\"].SetString(self.design_surface_sub_model_part_name)\n else:\n primal_parameters.AddString(\"design_surface_sub_model_part_name\", self.design_surface_sub_model_part_name)\n if primal_parameters.Has(\"auxiliary_mdpa_path\"):\n primal_parameters[\"auxiliary_mdpa_path\"].SetString(self.auxiliary_mdpa_path)\n else:\n primal_parameters.AddString(\"auxiliary_mdpa_path\", self.auxiliary_mdpa_path)\n open(self.response_settings[\"primal_settings\"].GetString(), 'w').write(primal_parameters.PrettyPrintJsonString())\n\n # Store current design\n self.current_model_part = _GetModelPart(model, primal_parameters[\"solver_settings\"])\n\n def Initialize(self):\n\n if not self.output_pressure_file_path == \"\" and not os.path.exists(self.output_pressure_file_path):\n os.makedirs(self.output_pressure_file_path)\n\n def InitializeSolutionStep(self):\n self.current_model_part.RemoveSubModelPart(\"fluid_computational_model_part\")\n self.step = self.current_model_part.ProcessInfo[KratosMultiphysics.STEP]\n KratosMultiphysics.ModelPartIO(self.auxiliary_mdpa_path, KratosMultiphysics.IO.WRITE | KratosMultiphysics.IO.MESH_ONLY).WriteModelPart( self.current_model_part)\n\n initial_time = time.time()\n self._RunXMC()\n elapsed_time = time.time() - initial_time\n\n if self.risk_measure == \"expected_value\":\n order = 1 ; is_central = False\n elif self.risk_measure == \"variance\":\n order = 2 ; is_central = True\n\n if not self.output_dict_results_file_name == \"\":\n self.results_dict[self.step] = {}\n\n # save lift coefficient\n qoi_counter = 0\n estimator_container = [] # here we append the estimator for each index/level\n error_container = [] # here we append the variance of the estimator for each index/level\n n_samples_container = []\n for index in range (len(self.xmc_analysis.monteCarloSampler.indices)):\n self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter] = get_value_from_remote(self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter])\n estimator_container.append(float(get_value_from_remote(self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter].value(order=order, isCentral=is_central))))\n error_container.append(float(get_value_from_remote(self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter].value(order=order, isCentral=is_central, isErrorEstimationRequested=True)[1])))\n n_samples_container.append(int(get_value_from_remote(self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter]._sampleCounter)))\n qoi_counter += 1\n # linearly sum estimators: this summation operation is valid for expected value and central moments\n # we refer to equation 4 of Krumscheid, S., Nobile, F., & Pisaroni, M. (2020). Quantifying uncertain system outputs via the multilevel Monte Carlo method — Part I: Central moment estimation. Journal of Computational Physics. https://doi.org/10.1016/j.jcp.2020.109466\n self._value = sum(estimator_container)\n # compute statistical error as in section 2.2 of\n # Pisaroni, M., Nobile, F., & Leyland, P. (2017). A Continuation Multi Level Monte Carlo (C-MLMC) method for uncertainty quantification in compressible inviscid aerodynamics. Computer Methods in Applied Mechanics and Engineering, 326, 20–50. https://doi.org/10.1016/j.cma.2017.07.030\n statistical_error = math.sqrt(sum(error_container))\n\n if not self.output_dict_results_file_name == \"\":\n self.results_dict[self.step][\"run_time\"]=elapsed_time\n self.results_dict[self.step][\"number_of_samples\"]=n_samples_container\n self.results_dict[self.step][\"lift_coefficient\"]={}\n self.results_dict[self.step][\"lift_coefficient\"][\"risk_measure\"]=self.risk_measure\n self.results_dict[self.step][\"lift_coefficient\"][\"value\"]=self._value\n self.results_dict[self.step][\"lift_coefficient\"][\"statistical_error\"]=statistical_error\n\n # save pressure coefficient\n pressure_dict = {}\n member = 0\n for node in self.current_model_part.GetSubModelPart(self.design_surface_sub_model_part_name).Nodes:\n estimator_container = [] # here we append contribution for each index/level\n variance_container = [] # here we append contribution for each index/level\n for index in range (len(self.xmc_analysis.monteCarloSampler.indices)):\n self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter] = get_value_from_remote(self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter])\n estimator_container.append(float(get_value_from_remote(self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter].multiValue(order=order, component = member, isCentral=is_central))))\n variance_container.append(float(get_value_from_remote(self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter].multiValue(order=2, component = member, isCentral=True))))\n pressure_coefficient = sum(estimator_container) # sum raw/central moment estimations on different indeces/levels\n variance_pressure_coefficient = sum(variance_container) # sum raw/central moment estimations on different indeces/levels\n member += 1\n pressure_dict[node.Id] = {}\n pressure_dict[node.Id][\"coordinates\"] = [node.X, node.Y, node.Z]\n pressure_dict[node.Id][\"pressure_coefficient\"] = pressure_coefficient\n pressure_dict[node.Id][\"variance_pressure_coefficient\"] = variance_pressure_coefficient\n node.SetValue(KratosMultiphysics.PRESSURE_COEFFICIENT, pressure_coefficient)\n qoi_counter += 1\n if not self.output_pressure_file_path == \"\":\n with open(self.output_pressure_file_path+\"/pressure_\"+str(self.step)+\".json\", 'w') as fp:\n json.dump(pressure_dict, fp,indent=4, sort_keys=True)\n\n # save shape sensitivity\n member = 0\n for node in self.current_model_part.GetSubModelPart(self.design_surface_sub_model_part_name).Nodes:\n shape_sensitivity = KratosMultiphysics.Vector(3, 0.0)\n for idim in range(3):\n estimator_container = [] # here we append contribution for each index/level\n for index in range (len(self.xmc_analysis.monteCarloSampler.indices)):\n self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter] = get_value_from_remote(self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter])\n estimator_container.append(float(get_value_from_remote(self.xmc_analysis.monteCarloSampler.indices[index].qoiEstimator[qoi_counter].multiValue(order=order, component = member, isCentral=is_central))))\n shape_sensitivity[idim] = sum(estimator_container) # sum raw/central moment estimations on different indeces/levels\n member += 1\n\n node.SetValue(KratosMultiphysics.SHAPE_SENSITIVITY, shape_sensitivity)\n\n def CalculateValue(self):\n pass\n\n def CalculateGradient(self):\n pass\n\n def GetValue(self):\n return self._value\n\n def GetNodalGradient(self, variable):\n if variable != KratosMultiphysics.SHAPE_SENSITIVITY:\n raise RuntimeError(\"GetNodalGradient: No gradient for {}!\".format(variable.Name))\n\n gradient = {node.Id : node.GetValue(variable) for node in self.current_model_part.Nodes}\n\n return gradient\n\n def Finalize(self):\n if not self.output_dict_results_file_name == \"\":\n with open(self.output_dict_results_file_name, 'w') as fp:\n json.dump(self.results_dict, fp,indent=4, sort_keys=True)\n\n def _GetLabel(self):\n type_labels = {\n \"stochastic_adjoint_lift_potential_jump\" : \"StochasticLiftPotentialJump\"\n }\n response_type = self.response_settings[\"response_type\"].GetString()\n return \"Adjoint\" + type_labels[response_type] +\"Response\"\n\n def _RunXMC(self):\n # read parameters\n with open(self.xmc_settings_path,'r') as parameter_file:\n parameters = json.load(parameter_file)\n\n # SolverWrapper\n parameters[\"solverWrapperInputDictionary\"][\"qoiEstimator\"] = parameters[\"monteCarloIndexInputDictionary\"][\"qoiEstimator\"]\n\n # SampleGenerator\n samplerInputDictionary = parameters[\"samplerInputDictionary\"]\n samplerInputDictionary['randomGeneratorInputDictionary'] = parameters[\"randomGeneratorInputDictionary\"]\n samplerInputDictionary['solverWrapperInputDictionary'] = parameters[\"solverWrapperInputDictionary\"]\n\n # MonteCarloIndex\n monteCarloIndexInputDictionary = parameters[\"monteCarloIndexInputDictionary\"]\n monteCarloIndexInputDictionary[\"samplerInputDictionary\"] = samplerInputDictionary\n\n # MonoCriterion\n criteriaArray = []\n criteriaInputs = []\n for monoCriterion in (parameters[\"monoCriteriaInpuctDictionary\"]):\n criteriaArray.append(xmc.monoCriterion.MonoCriterion(\\\n parameters[\"monoCriteriaInpuctDictionary\"][monoCriterion][\"criteria\"],\\\n parameters[\"monoCriteriaInpuctDictionary\"][monoCriterion][\"tolerance\"]))\n criteriaInputs.append([parameters[\"monoCriteriaInpuctDictionary\"][monoCriterion][\"input\"]])\n\n # MultiCriterion\n multiCriterionInputDictionary=parameters[\"multiCriterionInputDictionary\"]\n multiCriterionInputDictionary[\"criteria\"] = criteriaArray\n multiCriterionInputDictionary[\"inputsForCriterion\"] = criteriaInputs\n criterion = xmc.multiCriterion.MultiCriterion(**multiCriterionInputDictionary)\n\n # ErrorEstimator\n errorEstimator = xmc.errorEstimator.ErrorEstimator(**parameters[\"errorEstimatorInputDictionary\"])\n\n # HierarchyOptimiser\n hierarchyCostOptimiser = xmc.hierarchyOptimiser.HierarchyOptimiser(**parameters[\"hierarchyOptimiserInputDictionary\"])\n\n # EstimationAssembler\n assemblers = []\n if \"expectationAssembler\" in parameters[\"estimationAssemblerInputDictionary\"].keys():\n expectationAssembler = xmc.estimationAssembler.EstimationAssembler(**parameters[\"estimationAssemblerInputDictionary\"][\"expectationAssembler\"])\n assemblers.append(expectationAssembler)\n if \"discretizationErrorAssembler\" in parameters[\"estimationAssemblerInputDictionary\"].keys():\n discretizationErrorAssembler = xmc.estimationAssembler.EstimationAssembler(**parameters[\"estimationAssemblerInputDictionary\"][\"discretizationErrorAssembler\"])\n assemblers.append(discretizationErrorAssembler)\n if \"varianceAssembler\" in parameters[\"estimationAssemblerInputDictionary\"].keys():\n varianceAssembler = xmc.estimationAssembler.EstimationAssembler(**parameters[\"estimationAssemblerInputDictionary\"][\"varianceAssembler\"])\n assemblers.append(varianceAssembler)\n\n # MonteCarloSampler\n monteCarloSamplerInputDictionary = parameters[\"monteCarloSamplerInputDictionary\"]\n monteCarloSamplerInputDictionary[\"indexConstructorDictionary\"] = monteCarloIndexInputDictionary\n monteCarloSamplerInputDictionary[\"assemblers\"] = assemblers\n monteCarloSamplerInputDictionary[\"errorEstimators\"] = [errorEstimator]\n mcSampler = xmc.monteCarloSampler.MonteCarloSampler(**monteCarloSamplerInputDictionary)\n\n # XMCAlgorithm\n XMCAlgorithmInputDictionary = parameters[\"XMCAlgorithmInputDictionary\"]\n XMCAlgorithmInputDictionary[\"monteCarloSampler\"] = mcSampler\n XMCAlgorithmInputDictionary[\"hierarchyOptimiser\"] = hierarchyCostOptimiser\n XMCAlgorithmInputDictionary[\"stoppingCriterion\"] = criterion\n\n self.xmc_analysis = xmc.XMCAlgorithm(**XMCAlgorithmInputDictionary)\n\n if (parameters[\"solverWrapperInputDictionary\"][\"asynchronous\"] is True):\n self.xmc_analysis.runAsynchronousXMC()\n else:\n self.xmc_analysis.runXMC()\n\n def _GetAdjointParameters(self):\n with open(self.response_settings[\"adjoint_settings\"].GetString(),'r') as parameter_file:\n adjoint_parameters = Parameters( parameter_file.read() )\n\n return adjoint_parameters\n\nclass SimulationScenario(potential_flow_analysis.PotentialFlowAnalysis):\n def __init__(self,input_model,input_parameters,sample):\n self.sample = sample\n self.mapping = False\n self.adjoint_parameters_path =input_parameters[\"adjoint_parameters_path\"].GetString()\n self.design_surface_sub_model_part_name = input_parameters[\"design_surface_sub_model_part_name\"].GetString()\n self.main_model_part_name = input_parameters[\"solver_settings\"][\"model_part_name\"].GetString()\n self.auxiliary_mdpa_path = input_parameters[\"auxiliary_mdpa_path\"].GetString()\n\n super().__init__(input_model,input_parameters)\n\n def Finalize(self):\n\n super().Finalize()\n aoa = self.project_parameters[\"processes\"][\"boundary_conditions_process_list\"][0][\"Parameters\"][\"angle_of_attack\"].GetDouble()\n mach = self.project_parameters[\"processes\"][\"boundary_conditions_process_list\"][0][\"Parameters\"][\"mach_infinity\"].GetDouble()\n self.primal_model_part = self._GetSolver().main_model_part\n nodal_velocity_process = KCPFApp.ComputeNodalValueProcess(self.primal_model_part, [\"VELOCITY\"])\n nodal_velocity_process.Execute()\n\n # Store mesh to solve with adjoint after remeshing\n self.primal_model_part.RemoveSubModelPart(\"fluid_computational_model_part\")\n self.primal_model_part.RemoveSubModelPart(\"wake_sub_model_part\")\n KratosMultiphysics.ModelPartIO(self.auxiliary_mdpa_path+\"_\"+str(self.sample[0]), KratosMultiphysics.IO.WRITE | KratosMultiphysics.IO.MESH_ONLY).WriteModelPart(self.primal_model_part)\n\n with open(self.adjoint_parameters_path,'r') as parameter_file:\n adjoint_parameters = KratosMultiphysics.Parameters( parameter_file.read() )\n # Create the adjoint solver\n adjoint_parameters = _CheckParameters(adjoint_parameters)\n adjoint_model = KratosMultiphysics.Model()\n\n adjoint_parameters[\"processes\"][\"boundary_conditions_process_list\"][0][\"Parameters\"][\"mach_infinity\"].SetDouble(mach)\n adjoint_parameters[\"processes\"][\"boundary_conditions_process_list\"][0][\"Parameters\"][\"angle_of_attack\"].SetDouble(aoa)\n adjoint_parameters[\"solver_settings\"][\"model_import_settings\"][\"input_filename\"].SetString(self.auxiliary_mdpa_path+\"_\"+str(self.sample[0]))\n self.adjoint_analysis = potential_flow_analysis.PotentialFlowAnalysis(adjoint_model, adjoint_parameters)\n\n self.primal_state_variables = [KCPFApp.VELOCITY_POTENTIAL, KCPFApp.AUXILIARY_VELOCITY_POTENTIAL]\n\n self.adjoint_analysis.Initialize()\n self.adjoint_model_part = self.adjoint_analysis._GetSolver().main_model_part\n\n # synchronize the modelparts\n self._SynchronizeAdjointFromPrimal()\n\n self.adjoint_analysis.RunSolutionLoop()\n self.adjoint_analysis.Finalize()\n self.response_function = self.adjoint_analysis._GetSolver()._GetResponseFunction()\n\n def ModifyInitialProperties(self):\n \"\"\"\n Method introducing the stochasticity in the right hand side. Mach number and angle of attack are random varaibles.\n \"\"\"\n mach = abs(self.sample[1])\n alpha = self.sample[2]\n self.project_parameters[\"processes\"][\"boundary_conditions_process_list\"][0][\"Parameters\"][\"mach_infinity\"].SetDouble(mach)\n self.project_parameters[\"processes\"][\"boundary_conditions_process_list\"][0][\"Parameters\"][\"angle_of_attack\"].SetDouble(alpha)\n super().ModifyInitialProperties()\n\n\n def EvaluateQuantityOfInterest(self):\n \"\"\"\n Method evaluating the QoI of the problem: lift coefficient.\n \"\"\"\n qoi_list = [self.response_function.CalculateValue(self.primal_model_part)]\n Logger.PrintInfo(\"StochasticAdjointResponse\", \" Lift Coefficient: \",qoi_list[0])\n\n pressure_coefficient = []\n nodal_value_process = KCPFApp.ComputeNodalValueProcess(self.adjoint_analysis._GetSolver().main_model_part, [\"PRESSURE_COEFFICIENT\"])\n nodal_value_process.Execute()\n if (self.mapping is not True):\n for node in self.adjoint_analysis._GetSolver().main_model_part.GetSubModelPart(self.design_surface_sub_model_part_name).Nodes:\n this_pressure = node.GetValue(KratosMultiphysics.PRESSURE_COEFFICIENT)\n pressure_coefficient.append(this_pressure)\n\n elif (self.mapping is True):\n for node in self.mapping_reference_model.GetModelPart(self.main_model_part_name).GetSubModelPart(self.design_surface_sub_model_part_name).Nodes:\n this_pressure = node.GetValue(KratosMultiphysics.PRESSURE_COEFFICIENT)\n pressure_coefficient.append(this_pressure)\n # Fill the rest of the list to match SHAPE_SENSITIVITY data structure length\n pressure_coefficient.extend([0.0]*self.mapping_reference_model.GetModelPart(self.main_model_part_name).GetSubModelPart(self.design_surface_sub_model_part_name).NumberOfNodes()*2)\n qoi_list.append(pressure_coefficient)\n\n shape_sensitivity = []\n if (self.mapping is not True):\n for node in self.adjoint_analysis._GetSolver().main_model_part.GetSubModelPart(self.design_surface_sub_model_part_name).Nodes:\n this_shape = node.GetSolutionStepValue(KratosMultiphysics.SHAPE_SENSITIVITY)\n shape_sensitivity.extend(this_shape)\n\n elif (self.mapping is True):\n for node in self.mapping_reference_model.GetModelPart(self.main_model_part_name).GetSubModelPart(self.design_surface_sub_model_part_name).Nodes:\n this_shape = node.GetValue(KratosMultiphysics.SHAPE_SENSITIVITY)\n shape_sensitivity.extend(this_shape)\n qoi_list.append(shape_sensitivity)\n Logger.PrintInfo(\"StochasticAdjointResponse\", \"Total number of QoI:\",len(qoi_list))\n return qoi_list\n\n def MappingAndEvaluateQuantityOfInterest(self):\n\n nodal_value_process = KCPFApp.ComputeNodalValueProcess(self.adjoint_analysis._GetSolver().main_model_part, [\"PRESSURE_COEFFICIENT\"])\n nodal_value_process.Execute()\n\n KratosMultiphysics.VariableUtils().SetNonHistoricalVariableToZero(KratosMultiphysics.PRESSURE_COEFFICIENT, self.mapping_reference_model.GetModelPart(self.main_model_part_name).Nodes)\n KratosMultiphysics.VariableUtils().SetNonHistoricalVariableToZero(KratosMultiphysics.SHAPE_SENSITIVITY, self.mapping_reference_model.GetModelPart(self.main_model_part_name).Nodes)\n\n # map from current model part of interest to reference model part\n mapping_parameters = KratosMultiphysics.Parameters(\"\"\"{\n \"mapper_type\": \"nearest_element\",\n \"echo_level\" : 0\n }\"\"\")\n mapping_parameters.AddString(\"interface_submodel_part_origin\", self.design_surface_sub_model_part_name)\n mapping_parameters.AddString(\"interface_submodel_part_destination\", self.design_surface_sub_model_part_name)\n mapper = KratosMultiphysics.MappingApplication.MapperFactory.CreateMapper(self.adjoint_analysis._GetSolver().main_model_part,self.mapping_reference_model.GetModelPart(self.main_model_part_name),mapping_parameters)\n mapper.Map(KratosMultiphysics.PRESSURE_COEFFICIENT, \\\n KratosMultiphysics.PRESSURE_COEFFICIENT, \\\n KratosMultiphysics.MappingApplication.Mapper.FROM_NON_HISTORICAL | \\\n KratosMultiphysics.MappingApplication.Mapper.TO_NON_HISTORICAL)\n mapper.Map(KratosMultiphysics.SHAPE_SENSITIVITY, \\\n KratosMultiphysics.SHAPE_SENSITIVITY,\n KratosMultiphysics.MappingApplication.Mapper.TO_NON_HISTORICAL)\n # evaluate qoi\n qoi_list = self.EvaluateQuantityOfInterest()\n return qoi_list\n\n def _SynchronizeAdjointFromPrimal(self):\n\n if len(self.primal_model_part.Nodes) != len(self.adjoint_model_part.Nodes):\n raise RuntimeError(\"_SynchronizeAdjointFromPrimal: Model parts have a different number of nodes!\")\n\n # TODO this should happen automatically\n for primal_node, adjoint_node in zip(self.primal_model_part.Nodes, self.adjoint_model_part.Nodes):\n adjoint_node.X0 = primal_node.X0\n adjoint_node.Y0 = primal_node.Y0\n adjoint_node.Z0 = primal_node.Z0\n adjoint_node.X = primal_node.X\n adjoint_node.Y = primal_node.Y\n adjoint_node.Z = primal_node.Z\n\n variable_utils = KratosMultiphysics.VariableUtils()\n for variable in self.primal_state_variables:\n variable_utils.CopyModelPartNodalVar(variable, self.primal_model_part, self.adjoint_model_part, 0)\n\ndef _CheckParameters(parameters):\n if not parameters[\"solver_settings\"].Has(\"reform_dofs_at_each_step\") or not parameters[\"solver_settings\"][\"reform_dofs_at_each_step\"].GetBool():\n if not parameters[\"solver_settings\"].Has(\"reform_dofs_at_each_step\"):\n parameters[\"solver_settings\"].AddEmptyValue(\"reform_dofs_at_each_step\")\n parameters[\"solver_settings\"][\"reform_dofs_at_each_step\"].SetBool(True)\n wrn_msg = 'This solver requires the setting reform the dofs at each step in optimization.'\n wrn_msg += 'The solver setting has been set to True'\n for subproc_keys, subproc_values in parameters[\"processes\"].items():\n for process in subproc_values.values():\n if \"wake\" in process[\"python_module\"].GetString():\n if not process[\"Parameters\"].Has(\"compute_wake_at_each_step\") or not process[\"Parameters\"][\"compute_wake_at_each_step\"].GetBool():\n if not process[\"Parameters\"].Has(\"compute_wake_at_each_step\"):\n process[\"Parameters\"].AddEmptyValue(\"compute_wake_at_each_step\")\n process[\"Parameters\"][\"compute_wake_at_each_step\"].SetBool(True)\n return parameters\n","repo_name":"KratosMultiphysics/Kratos","sub_path":"applications/CompressiblePotentialFlowApplication/python_scripts/stochastic_potential_flow_response.py","file_name":"stochastic_potential_flow_response.py","file_ext":"py","file_size_in_byte":26928,"program_lang":"python","lang":"en","doc_type":"code","stars":906,"dataset":"github-code","pt":"44"} +{"seq_id":"73663305411","text":"\"\"\"Sercive Endpoint.\n\nThe service endpoint reveals information about this beacon useful for the Beacon Network.\n\n.. note:: See ``beacon_api`` root folder ``__init__.py`` for changing values used here.\n\nGET /services\n Lists services known by this service.\n Returns an array of ServiceInfo.\nGET /services?serviceType={serviceType}\n Returns an array of ServiceInfo filtered by type\nGET /services?model={model}\n Returns an array of ServiceInfo in an specific model, i.e.: \"Beacon-v1\" or \"GA4GH-ServiceInfo-v0.1\"\nGET /services?listFormat='full|short' \n full: (default) Returns an array of ServiceInfo\n short: returns just the id, name, serviceURL, ServiceType and open.\nGET /services?apiVersion={version}\n Returns an array of ServiceInfo filtered by Service API version supported\nGET /services/{id}\n List a service details.\n Returns the ServiceInfo of the node\nPOST /services\n Requires HTTPS.\n Including Beacon info (/datasets too ??)\nPUT /services/{id}\n Requires HTTPS.\nDELETE /services/{id}\n Requires HTTPS.\n\"\"\"\n\nimport logging\nfrom aiocache import cached\n# from aiocache.serializers import JsonSerializer\n\nfrom .exceptions import BeaconServicesBadRequest\n\nfrom ..utils.models import Beacon_v1, GA4GH_ServiceInfo_v01, organization\n\n\nLOG = logging.getLogger(__name__)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# FORMATTING\n# ----------------------------------------------------------------------------------------------------------------------\ndef transform_services(record, short=False):\n \"\"\"\n Transform the services record to a dict ready to be shown as a response. \n If the short parameter is set to True, it will create a dict based on this shortened format.\n \"\"\"\n response = dict(record)\n\n if not short: \n # create a dict for the organization info\n organization = {}\n organization['id'] = response.pop('organizaion_stable_id')\n organization['name'] = response.pop('organization_name')\n organization['description'] = response.pop('organization_description')\n organization['address'] = response.pop('address')\n organization['welcome_url'] = response.pop('organization_welcome_url')\n organization['contact_url'] = response.pop('contact_url')\n organization['logo_url'] = response.pop('logo_url')\n organization['info'] = response.pop('info')\n \n # create the service dict\n response[\"id\"] = response.pop(\"service_stable_id\")\n response[\"name\"] = response.pop(\"service_name\")\n response[\"serviceType\"] = response.pop(\"service_type\")\n response[\"apiVersion\"] = response.pop(\"api_version\")\n response[\"serviceUrl\"] = response.pop(\"service_url\")\n response[\"entryPoint\"] = response.pop(\"entry_point\")\n response[\"organization\"] = organization\n response[\"description\"] = response.pop(\"service_description\")\n response[\"version\"] = response.pop(\"version\")\n response[\"open\"] = response.pop(\"open\")\n response[\"welcomeUrl\"] = response.pop(\"service_welcome_url\")\n response[\"alternativeUrl\"] = response.pop(\"alternative_url\")\n response[\"createDateTime\"] = response.pop(\"create_date_time\")\n response[\"updateDateTime\"] = response.pop(\"update_date_time\")\n\n else: \n # create the short service dict\n response[\"id\"] = response.pop(\"service_stable_id\")\n response[\"name\"] = response.pop(\"service_name\")\n response[\"serviceUrl\"] = response.pop(\"service_url\")\n response[\"serviceType\"] = response.pop(\"service_type\")\n response[\"open\"] = response.pop(\"open\")\n\n return response\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# MAIN QUERY TO THE DATABASE\n# ----------------------------------------------------------------------------------------------------------------------\n\nasync def fetch_filtered_services(db_pool, processed_request):\n \"\"\"\n Fetch the services based on the filter parameters given.\n \"\"\"\n # Get the parameters\n serviceType = None if not processed_request.get('serviceType') else processed_request.get('serviceType')\n listFormat = None if not processed_request.get('listFormat') else processed_request.get('listFormat')\n version = None if not processed_request.get('apiVersion') else processed_request.get('apiVersion')\n\n # Take one connection from the database pool\n async with db_pool.acquire(timeout=180) as connection:\n # Fetch different parameters depending on the listFormat\n if not listFormat or listFormat == 'long':\n try:\n query = \"\"\"SELECT *\n FROM service WHERE\n coalesce(service_type = any($1::varchar[]), true)\n AND coalesce(version = any($2::varchar[]), true);\n \"\"\"\n statement = await connection.prepare(query)\n db_response = await statement.fetch(service_type, version)\n except Exception as e:\n raise BeaconServerError(f'Query service DB error: {e}')\n elif listFormat == 'short': # returns only id, name, serviceURL, ServiceType and open.\n\n try:\n query = \"\"\"SELECT service_stable_id, service_name, service_url, service_type, open\n FROM service WHERE\n coalesce(service_type = any($1::varchar[]), true)\n AND coalesce(version = any($2::varchar[]), true);\n \"\"\"\n statement = await connection.prepare(query)\n db_response = await statement.fetch(service_type, version)\n except Exception as e:\n raise BeaconServerError(f'Query short service DB error: {e}')\n services = []\n for record in list(db_response):\n transformed_service = transform_services(record, short=True) if listFormat == 'short' else transform_services(record)\n services.append(transformed_service)\n return services\n\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# SERVICES HANDLER\n# ----------------------------------------------------------------------------------------------------------------------\n\nasync def services_handler(db_pool, processed_request, request):\n \"\"\"Construct the `Beacon` app services dict.\n\n :return beacon_services: A dict that contain the services about this ``Beacon``.\n \"\"\"\n\n # Return an error for the parameters that are not implemented\n if processed_request.get('model'):\n processed_request.pop('model')\n raise BeaconServicesBadRequest(processed_request, request.host, \"The 'model' parameter is not supported yet.\") \n\n # Query the DB to get all the services (the filtering is done inside the fetch_filtered_services function)\n services = await fetch_filtered_services(db_pool, processed_request)\n\n return services\n\n\n\n# FIRST IMPLEMENTATION: works only if just the self-beacon service is shown\n# async def services_handler(db_pool, processed_request, request):\n# \"\"\"Construct the `Beacon` app services dict.\n\n# :return beacon_services: A dict that contain the services about this ``Beacon``.\n# \"\"\"\n# # Handle the query options and show what is asked for\n# serviceType = processed_request.get('serviceType')\n# model = processed_request.get('model')\n# listFormat = processed_request.get('listFormat')\n# version = processed_request.get('apiVersion')\n\n# # Return an error for the parameters that are not implemented\n# if serviceType:\n# raise BeaconServicesBadRequest(processed_request, request.host, \"The 'serviceType' parameter is not supported yet.\")\n# if version:\n# raise BeaconServicesBadRequest(processed_request, request.host, \"The 'apiVersion' parameter is not supported yet.\") \n\n\n# # First, we decide which model we will show, the default is Beacon-v1\n# if model == \"GA4GH-ServiceInfo-v0.1\":\n# serviceInfo = GA4GH_ServiceInfo_v01(request.host)\n# elif model == \"Beacon-v1\":\n# serviceInfo = Beacon_v1(request.host)\n# else: \n# serviceInfo = Beacon_v1(request.host)\n\n# # Then, we leave it full length or we shorten it, the default is full\n# if listFormat == \"short\": \n# if model == \"GA4GH-ServiceInfo-v0.1\":\n# raise BeaconServicesBadRequest(processed_request, request.host, f\"The combination of 'model': {model} and 'format': {listFormat} is not supported\")\n# else:\n# required = [\"id\", \"name\", \"serviceUrl\", \"serviceType\", \"open\"]\n# serviceInfo = {k: v for k, v in serviceInfo.items() if k in required}\n\n# return serviceInfo \n","repo_name":"d-salgado/beacon4hcnv","sub_path":"beacon_api/api/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":9029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"44"} +{"seq_id":"17946223051","text":"import random\nimport time\n\ndef play_numguess():\n print(\"Welcome to number guess!\")\n try:\n f = open(\"./numberguess.txt\")\n rand = int(f.readline().strip())\n range = int(f.readline().strip())\n guesses = int(f.readline())\n f.close()\n f = open(\"./numberguess.txt\", \"w\")\n f.close()\n print(\"Continuing saved game state.\")\n except ValueError:\n while True:\n try:\n range = int(input(\"Enter an upper limit:\\n\"))\n except ValueError:\n print(\"Please enter a valid number!\")\n continue\n break\n rand = random.randrange(0, range)\n guesses = 0\n except FileNotFoundError: \n while True:\n try:\n range = int(input(\"Enter an upper limit:\\n\"))\n except ValueError:\n print(\"Please enter a valid number!\")\n continue\n break\n rand = random.randrange(0, range)\n guesses = 0\n while True:\n try:\n print(f\"Number of attempted guesses: {guesses}\")\n guess = input(f\"Enter your guess between 0 and {str(range)} (q to quit)\\n\")\n if guess == \"q\":\n f = open(\"./numberguess.txt\", \"w\")\n f.write(str(rand) + \"\\n\")\n f.write(str(range) + \"\\n\")\n f.write(str(guesses))\n f.close()\n print(\"Progress saved.\")\n break \n guess = int(guess)\n guesses += 1\n if guess == rand:\n print(\"You got it!\")\n time.sleep(3)\n break\n else:\n print(\"Not quite.\")\n if range > 10:\n diff = abs(rand - guess)\n if guesses >= 5 and diff > 25:\n print(\"You're off by more than 25.\")\n elif guesses >= 4 and diff > 20:\n print(\"You're off by more than 20.\")\n elif guesses >= 3 and diff > 15:\n print(\"You're off by more than 15.\")\n elif guesses >= 2 and diff > 10:\n print(\"You're off by more than 10.\")\n elif guesses >= 1 and diff > 5:\n print(\"You're off by more than 5.\")\n else:\n print(\"You're off by less than or equal to 5!\")\n continue\n except ValueError:\n print(\"Enter a valid number!\")\n continue \n\n# play_numguess()","repo_name":"Aureliaven/Game-Library","sub_path":"numberguess.py","file_name":"numberguess.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71291541250","text":"\"\"\" Test the dep_graph function by asserting certain list as an output.\n This way we can verify if the result is the one expected.\n\n Also checks the exception for the circle.\n\"\"\"\nfrom dep_graph import dep_graph\nimport os\nimport pytest\n\nfolder_path = './tmp' # initial path of the folders.\nlist_of_tests = [] # list with all the json, that we test.\n\n# Loop through the folder to get all the dependencies.\nfor root, dirs, files in os.walk(folder_path):\n for file in files:\n file_path = os.path.join(root, file)\n list_of_tests.append(file_path)\n\n\nclass TestClass:\n \"\"\" Grouping tests in class TestClass.\n \"\"\"\n\n def test_initial(self):\n path = list_of_tests[0]\n assert dep_graph(path) == ['pkg1', 'pkg2',\n 'pkg3', 'pkg3', 'pkg2', 'pkg3', 'pkg3']\n\n def test_scale(self):\n path = list_of_tests[1]\n assert dep_graph(path) == [\"pkg1\", \"pkg2\", \"pkg3\", \"pkg4\", \"pkg4\", \"pkg3\",\n \"pkg4\", \"pkg2\", \"pkg3\", \"pkg4\", \"pkg4\", \"pkg3\", \"pkg4\", \"pkg4\", ]\n\n def test_circle(self):\n path = './tmp/deps2.json'\n with pytest.raises(Exception, match=\"Ups\") as e_info:\n dep_graph(path)\n print(e_info)\n\n def test_empty(self):\n path = './tmp/deps3.json'\n assert dep_graph(path) == ['pkg1', 'pkg2', 'pkg3']\n\n def test_bigger(self):\n path = './tmp/deps4.json'\n assert dep_graph(path) == ['pkg1', 'pkg2', 'pkg3', 'pkg4', 'pkg5', 'pkg4', 'pkg5', 'pkg3', 'pkg4', 'pkg5', 'pkg4',\n 'pkg5', 'pkg5', 'pkg2', 'pkg3', 'pkg4', 'pkg5', 'pkg4', 'pkg5', 'pkg3', 'pkg4', 'pkg5', 'pkg4', 'pkg5', 'pkg5']\n","repo_name":"AggelosMargkas/dep_graph","sub_path":"test_dep_graph.py","file_name":"test_dep_graph.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37443200508","text":"import sys\n\nsys.stdin = open('input.txt')\n\n\ndef dfs(start=0, end=99):\n stack = [start]\n v = start\n while stack:\n for w in G[v]:\n if w == end:\n return 1\n if not visited[w]:\n visited[w] = 1\n stack.append(w)\n v = w\n break\n else: # 모두 방문했거나 갈 수 있는 곳이 없을 경우 되돌아가기\n v = stack.pop()\n return 0\n\n\nfor t in range(1, 11):\n tc, E = map(int, input().split())\n routes = list(map(int, input().split()))\n G = [[] for _ in range(100)]\n visited = [0] * (100)\n\n for i in range(0, E * 2, 2):\n G[routes[i]].append(routes[i + 1])\n\n print('#{} {}'.format(t, dfs()))\n","repo_name":"kimkisol/TIL","sub_path":"Algorithm/SWEA/1219_길찾기/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"70961319811","text":"class Solution:\n def findMin(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n low,high=0,len(nums)-1\n while low < high:\n mid = (low + high) // 2\n if nums[mid] <= nums[high]:\n high = mid\n else:\n low = mid + 1\n return nums[low]\n\ntest=[4,5,6,7,0,1,2]\ntest = [3, 3, 1, 3]\ntest=[10,1,10,10,10]\nprint(Solution().findMin(test))\n\n","repo_name":"yi-yun/MyPythonScripts","sub_path":"LeetCode/LeetCode_153.py","file_name":"LeetCode_153.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"19744450389","text":"\"\"\"\npathlib_test_01.py\nTest of pathlib modules: Comparing results of the same code for Linux and Windows\nUwe Schweinsberg 15.8.19\n\"\"\"\nfrom pathlib import Path\nimport os\nimport sys\n\n# list directory tree structure\ndef list_files(startpath, txt_f):\n for root, dirs, files in os.walk(startpath):\n level = root.replace(startpath, '').count(os.sep)\n indent = ' ' * 4 * (level)\n print('{}{}/'.format(indent, os.path.basename(root)))\n txt_f.write('{}{}/\\n'.format(indent, os.path.basename(root)))\n subindent = ' ' * 4 * (level + 1)\n for f in files:\n print('{}{}'.format(subindent, f))\n txt_f.write('{}{}/\\n'.format(subindent, f))\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n folder = sys.argv[1:]\n if len(sys.argv) > 2:\n text_file = sys.argv[2:][0]\n else:\n text_file = 'tree + folder[0]'\n try:\n f_text = open(text_file, 'w')\n except IOError:\n print('{} could not be opend for write'.format(t_text))\n raise IOError\n else:\n raise NameError (\"No command line argument (name of path) is given. \")\n\n f_text.write('This tree has been created with {} \\n\\n'.format(__file__))\n list_files(folder[0], f_text)\n f_text.close()\n\n# get the name of the drive of the current working directory\nfolder = os.getcwd()","repo_name":"butayama/Python-Testprogramme","sub_path":"pathlib_test_01.py","file_name":"pathlib_test_01.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34359034813","text":"\"\"\" A module for taking object detections as CSV files, and visualizing them on \n top of videos\n\"\"\"\n\nimport cv2\nimport imageio as io\nimport numpy as np\nimport pandas as pd\nimport click\nfrom math import ceil\nfrom pathlib import Path\n\nfrom visualize import draw, class_colors\nfrom apply_mask import Masker\nfrom classnames import get_classnames\nfrom util import parse_resolution, print_flush, right_remove\nfrom folder import mkdir, datasets_path, runs_path\nfrom world import Calibration\n\ndef make_divisible(x, y):\n return int(y*ceil(float(x)/y))\n\ndef detections_video(detections, videopath, outvideopath, classnames, dataset, res, fps=15, conf_thresh=0.75, show_frame_number=True, coords='pixels'):\n \"\"\" Renders a video with the detections drawn on top\n \n Arguments:\n detections -- the detections as a pandas table\n videopath -- path to input video\n outvideopath -- path to output video showing the detections\n classnames -- list of all the classes\n dataset -- name of the dataset\n res -- resolution of output video and coordinates in csv file (assumed to be the same). Probably SSD resolution if performed on direct csv files, and probably the video resolution if performed on csv files with world coordinates\n fps -- frames-per-second of output video\n conf_thresh -- Detections with confidences below this are not shown in output video. Set to negative to not visualize confidences, or set to 0.0 to show all of them. \n show_frame_number -- writes the frame number in the top left corner of the video\n coords -- coordinate system of detections\n \"\"\"\n \n masker = Masker(dataset)\n \n calib = None\n if coords == 'world':\n calib = Calibration(dataset)\n\n num_classes = len(classnames)+1\n colors = class_colors(num_classes)\n\n outwidth = make_divisible(res[0], 16)\n outheight = make_divisible(res[1], 16)\n pad_vid = True\n if (outwidth == res[0]) and (outheight == res[1]):\n pad_vid = False\n \n with io.get_reader(videopath) as vid:\n with io.get_writer(outvideopath, fps=fps) as outvid:\n for i,frame in enumerate(vid):\n frame = masker.mask(frame, alpha=0.5)\n frame = cv2.resize(frame, (res[0], res[1]))\n \n dets = detections[detections['frame_number']==i]\n if len(dets) > 0:\n frame = draw(frame, dets, colors, conf_thresh=conf_thresh, coords=coords, calib=calib)\n \n if pad_vid:\n padded = 255*np.ones((outheight, outwidth, 3), dtype=np.uint8)\n padded[0:res[1], 0:res[0], :] = frame\n frame = padded \n \n if show_frame_number:\n cv2.putText(frame, 'Frame {}'.format(i), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)\n \n outvid.append_data(frame)\n \n if i%500 == 0:\n print_flush(\"Frame {}\".format(i))\n \n \n@click.command()\n@click.option(\"--cmd\", default=\"findvids\", help=\"Either 'findvids' to search for videos, or a path to a specific video's csv file containing detections\")\n@click.option(\"--res\", default=\"(640,480,3)\", help=\"Resolution that the detections are in, on the format '(width,height,channels)'. If working with pixel coordinates, then this should be the detector's resolution. If world coordinates, it should be video resolution\")\n@click.option(\"--dataset\", default=\"sweden2\", help=\"Name of the dataset\")\n@click.option(\"--run\", default=\"default\", help=\"Name of training run\")\n@click.option(\"--conf\", default=0.0, type=float, help=\"Confidence threshold\")\n@click.option(\"--fps\", default=15, type=int, help=\"Frames-per-second of output video\")\n@click.option(\"--coords\", default=\"pixels\", type=click.Choice(['pixels', 'world']), help=\"Coordinate system of data in csv files ('pixels' or 'world')\")\ndef main(cmd, res, dataset, run, conf, fps, coords):\n res = parse_resolution(res)\n classnames = get_classnames(dataset)\n \n local_output = False\n csvs = []\n if cmd == \"findvids\":\n if coords == \"pixels\":\n found = (runs_path / \"{}_{}\".format(dataset,run) / \"csv\").glob('*.csv')\n elif coords == \"world\":\n found = (runs_path / \"{}_{}\".format(dataset,run) / \"detections_world\").glob('*.csv')\n \n found = list(found)\n found.sort()\n csvs.extend(found)\n else:\n csvs.append(cmd)\n local_output = True\n \n if coords == \"pixels\":\n out_folder = runs_path / \"{}_{}\".format(dataset,run) / \"detections\"\n elif coords == \"world\":\n out_folder = runs_path / \"{}_{}\".format(dataset,run) / \"detections_world\"\n \n mkdir(out_folder)\n \n for csv_path in csvs:\n vidname = csv_path.stem\n if coords == \"world\":\n vidname = right_remove(vidname, '_world')\n \n vid_path = datasets_path / dataset / \"videos\" / (vidname+'.mkv') \n\n if local_output:\n outvid_path = Path('.') / '{}.mp4'.format(vidname)\n else:\n outvid_path = out_folder / '{}.mp4'.format(vidname) \n \n detections = pd.read_csv(csv_path)\n detections_video(detections, vid_path, outvid_path, classnames, dataset, res, fps=fps, conf_thresh=conf, coords=coords)\n print_flush(outvid_path)\n \n print_flush(\"Done!\")\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"ahrnbom/strudl","sub_path":"visualize_detections.py","file_name":"visualize_detections.py","file_ext":"py","file_size_in_byte":5590,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"43"} +{"seq_id":"13054363813","text":"from aiogram import Bot, types\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom config import TG_BOT_KEY, MY_ID_BOT\nfrom aiogram.dispatcher import Dispatcher\nfrom aiogram.utils import executor\nfrom api import Update\n\n\nstorage = MemoryStorage()\nbot = Bot(token=TG_BOT_KEY)\ndisDot = Dispatcher(bot, storage=storage)\n\n\n@disDot.message_handler(commands=[\"start\"])\nasync def start_command(message: types.Message):\n \"\"\"\n Обработка команды /start. Приветствие пользователя и предложение выбрать\n действие.\n \"\"\"\n if message.chat.type == types.ChatType.PRIVATE:\n keyboard_markup = types.ReplyKeyboardMarkup(row_width=2,\n resize_keyboard=True)\n up_button = types.KeyboardButton('/up')\n\n keyboard_markup.add(up_button)\n await message.reply('Привет! Что вы хотите сделать?',\n reply_markup=keyboard_markup)\n\n\n@disDot.message_handler(commands=['up'])\nasync def up_resume(message: types.Message):\n \"\"\"\n Обработка команды /up для поднятия резюме\n \"\"\"\n Update.STATUS_CODE_RESPONSE.clear()\n try:\n Update.update_resume()\n res = Update.STATUS_CODE_RESPONSE\n for up in res:\n if message.from_id != int(MY_ID_BOT):\n await message.reply('Ты не местный, я вызываю копов')\n elif up == 204:\n await message.reply('Резюме обновлено')\n elif up == 403:\n await message.reply(f'Неверный токен авторизации\\n'\n f'Ошибка: {up}')\n elif up == 429:\n await message.reply(f'Резюме не обновлено \\n'\n f'Ошибка: {up}')\n\n except Exception as e:\n await message.reply(f'Ошибка системы {e}')\n\n\n@disDot.message_handler()\nasync def allComands(message: types.Message):\n \"\"\" Обработка команд неверных\"\"\"\n await message.reply('Введите команду /up')\n\n\nif __name__ == '__main__':\n executor.start_polling(disDot)\n","repo_name":"budennovsk/Update_resume_hh-ru","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"358295611","text":"# -*- coding: utf-8 -*-\n\n# Usage:\n# scrapy crawl BBC_Responder -a chapter=sport -a news=12\n# scrapy crawl BBC_Responder -a chapter=sport -a news=12 -s LOG_FILE=BBC.log >> BBC.txt\n# scrapy crawl BBC_Responder -a chapter=travel -a news=12 -s LOG_FILE=BBC.log >> BBC.txt\n\nimport scrapy\nimport logging\nfrom scrapy.exceptions import CloseSpider\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom datetime import datetime\nimport scrapy, json, sys\nfrom w3lib.html import remove_tags\n\n\n\nclass BBC_ResponderSpider(CrawlSpider):\n\n # Set spider name\n name = 'BBC_Responder'\n# postfix='sport'\n\n # set domain for parsing\n allowed_domains = ['bbc.com']\n\n\n\n # Init constructor \n def __init__(self, category=None, *args, **kwargs):\n\n\n # Set parameters\n self.chapter = kwargs['chapter']\n\n # Set start path for grabing\n self.start_urls = ['http://www.bbc.com/'+self.chapter+'/']\n\n # Setup maximum allowed urls for download from page\n self.maxAllowedURL=int(kwargs['news'])\n\n # Init links counter\n self.linksCounter=0\n\n # Set allow path\n allowPath=r'/'+self.chapter\n\n # Create rule for parsing links on page\n self.rules = (\n# Rule(LinkExtractor(allow=r'/'+postfix), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=allowPath), callback='parse_item', follow=True),\n\n )\n # Call native constructor\n super(BBC_ResponderSpider, self).__init__(*args, **kwargs)\n\n\n\n\n # Parse scraped items\n def parse_item(self, response):\n i={}\n # Extract all titles and urls for page and print it \n extractor = LinkExtractor(deny_domains=self.allowed_domains[0])\n\n # Get title values\n i['title']=remove_tags(str(response.css('title').get()).encode(\"utf-8\"))\n\n # Setup variable for returning\n i['url']=response.url\n # Finish spider if maximum download maxAllowedURL is achived\n if (self.linksCounter >= self.maxAllowedURL ):\n # Finish application \n raise CloseSpider('maxAllowedURL is achived linksCounter='+str(self.linksCounter))\n # increment link counter\n self.linksCounter+=1\n # Output grabed content\n print(i)\n # print(self.ArticlesBuffer)\n return i\n","repo_name":"eugenykaminski/BBCResponder","sub_path":"BBCResponder/spiders/BBC_Responder.py","file_name":"BBC_Responder.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74701845568","text":"\"\"\"`kedro_viz.data_access.repositories.catalog` defines interface to\ncentralise access to Kedro data catalog.\"\"\"\n# pylint: disable=missing-class-docstring,missing-function-docstring,protected-access\nimport logging\nfrom typing import TYPE_CHECKING, Dict, Optional\n\nfrom kedro.io import DataCatalog\nfrom kedro.pipeline.pipeline import TRANSCODING_SEPARATOR, _strip_transcoding\nfrom packaging.version import parse\n\nfrom kedro_viz.constants import KEDRO_VERSION\n\ntry:\n # kedro 0.18.11 onwards\n from kedro.io import DatasetNotFoundError, MemoryDataset\nexcept ImportError: # pragma: no cover\n # older versions\n from kedro.io import DataSetNotFoundError as DatasetNotFoundError\n from kedro.io import MemoryDataSet as MemoryDataset\n\nif TYPE_CHECKING:\n try:\n # kedro 0.18.12 onwards\n from kedro.io.core import AbstractDataset\n except ImportError:\n # older versions\n from kedro.io.core import AbstractDataSet as AbstractDataset\n\nlogger = logging.getLogger(__name__)\n\n\nclass CatalogRepository:\n _catalog: DataCatalog\n\n def __init__(self):\n self._layers_mapping = None\n\n def get_catalog(self) -> DataCatalog:\n return self._catalog\n\n def set_catalog(self, value: DataCatalog):\n self._catalog = value\n\n def _validate_layers_for_transcoding(self, dataset_name, layer):\n existing_layer = self._layers_mapping.get(dataset_name)\n if existing_layer is not None and existing_layer != layer:\n raise ValueError(\n \"Transcoded datasets should have the same layer. \"\n \"Please ensure consistent layering in your Kedro catalog. \"\n f\"Mismatch found for: {dataset_name}\"\n )\n\n @property\n def layers_mapping(self):\n # pylint: disable=too-many-branches\n \"\"\"Return layer mapping: dataset_name -> layer it belongs to in the catalog\n From kedro-datasets 1.3.0 onwards, the 'layers' attribute is defined inside the 'metadata'\n under 'kedro-viz' plugin.\n\n Catalog before kedro-datasets 1.3.0:\n type: pandas.CSVDataset\n filepath: /filepath/to/dataset\n layers: raw\n\n Catalog from kedro-datasets 1.3.0 onwards:\n type: pandas.CSVDataset\n filepath: /filepath/to/dataset\n metadata:\n kedro-viz:\n layers: raw\n\n Currently, Kedro up to 18.x supports both formats. However,\n support for the old format will be discontinued from Kedro 19.x.\n Kedro-viz will continue to support both formats.\n It's recommended to follow the newest format for defining layers in the catalog.\n \"\"\"\n if self._layers_mapping is not None:\n return self._layers_mapping\n\n self._layers_mapping = {}\n\n # Temporary try/except block so the Kedro develop branch can work with Viz.\n try:\n datasets = self._catalog._data_sets\n # pylint: disable=broad-exception-caught\n except Exception: # pragma: no cover\n datasets = self._catalog._datasets\n\n # Maps layers according to the old format\n if KEDRO_VERSION < parse(\"0.19.0\"):\n if self._catalog.layers is None:\n self._layers_mapping = {\n _strip_transcoding(dataset_name): None for dataset_name in datasets\n }\n else:\n for layer, dataset_names in self._catalog.layers.items():\n for dataset_name in dataset_names:\n if TRANSCODING_SEPARATOR in dataset_name:\n dataset_name = _strip_transcoding(dataset_name)\n self._validate_layers_for_transcoding(dataset_name, layer)\n self._layers_mapping[dataset_name] = layer\n\n # Maps layers according to the new format\n for dataset_name in datasets:\n dataset = self._catalog._get_dataset(dataset_name)\n metadata = getattr(dataset, \"metadata\", None)\n if not metadata:\n continue\n try:\n layer = dataset.metadata[\"kedro-viz\"][\"layer\"]\n except (AttributeError, KeyError): # pragma: no cover\n logger.debug(\n \"No layer info provided under metadata in the catalog for %s\",\n dataset_name,\n )\n else:\n if TRANSCODING_SEPARATOR in dataset_name:\n dataset_name = _strip_transcoding(dataset_name)\n self._validate_layers_for_transcoding(dataset_name, layer)\n self._layers_mapping[dataset_name] = layer\n\n return self._layers_mapping\n\n def get_dataset(self, dataset_name: str) -> Optional[\"AbstractDataset\"]:\n dataset_obj: Optional[\"AbstractDataset\"]\n try:\n # Kedro 0.18.1 introduced the `suggest` argument to disable the expensive\n # fuzzy-matching process.\n if KEDRO_VERSION >= parse(\"0.18.1\"):\n dataset_obj = self._catalog._get_dataset(dataset_name, suggest=False)\n else: # pragma: no cover\n dataset_obj = self._catalog._get_dataset(dataset_name)\n except DatasetNotFoundError:\n dataset_obj = MemoryDataset()\n\n return dataset_obj\n\n def get_layer_for_dataset(self, dataset_name: str) -> Optional[str]:\n return self.layers_mapping.get(_strip_transcoding(dataset_name))\n\n def as_dict(self) -> Dict[str, Optional[\"AbstractDataset\"]]:\n return {\n dataset_name: self.get_dataset(dataset_name)\n for dataset_name in self._catalog.list()\n if self.get_dataset(dataset_name) is not None\n }\n\n @staticmethod\n def is_dataset_param(dataset_name: str) -> bool:\n \"\"\"Return whether a dataset is a parameter\"\"\"\n return (\n dataset_name.lower().startswith(\"params:\") or dataset_name == \"parameters\"\n )\n","repo_name":"kedro-org/kedro-viz","sub_path":"package/kedro_viz/data_access/repositories/catalog.py","file_name":"catalog.py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","stars":605,"dataset":"github-code","pt":"43"} +{"seq_id":"4729814612","text":"from scapy.all import *\nimport oyaml as yaml\n\nf= (open('packet_generator.yaml', 'r')).read()\ndict1= yaml.load(f , Loader=yaml.FullLoader)\n\n\ndef geneatingPacket():\n h= []\n for p in dict1:\n for i in dict1[p]:\n\n for j in dict1[p][i]:\n exec(j)\n\n for i in dict1[p]:\n if not i == 'Packet' :\n hdr = eval(i)()\t\t\t# Creating raw headers ex. hdr= IP() before giving the attributes\n for j in dict1[p][i]:\t\t# This will add attributes to already created raw headers\n k='hdr.'+j\t\t# ex. hdr.src='1.1.1.1'\n exec(k)\n h.append(hdr)\n #print(h)\n pkt=h[0]\n for j in range(1,len(h)):\n pkt= pkt/h[j]\n pkt.show()\n\ngeneatingPacket()\n","repo_name":"vika2218/YAML-packet-generator","sub_path":"packet_generator.py","file_name":"packet_generator.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"31566088393","text":"import json\nfrom Aula206a_Exercicio import Pessoa, CAMINHO_ARQUIVO,fazer_dump\n\nfazer_dump()\n\nlista_pessoas = []\n\nwith open(CAMINHO_ARQUIVO, 'r', encoding='utf8') as arquivo:\n dados = json.load(arquivo)\n\nfor dado in dados:\n lista_pessoas.append(Pessoa(**dado))\n\nfor pessoa in lista_pessoas:\n print(f'{pessoa.nome} tem {pessoa.idade} anos')","repo_name":"ALMTC/Py3-course","sub_path":"Aula206b_Exercicio.py","file_name":"Aula206b_Exercicio.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"21644383938","text":"#!/usr/bin/env python\n#coding=UTF8\n'''\n @author: devin\n @time: 2014-02-22\n @desc:\n 保存slave的相关信息\n'''\nimport json\n\nclass SlaveInfo:\n def __init__(self, name='raffle', local_ip=None, port=-1, recv_real_time_request=False):\n self.name = name # slave名称\n self.local_ip = local_ip # 所在机器的IP地址\n self.port = port\n self.recv_real_time_request = recv_real_time_request # 是否接收实时请求\n self.path = None # 所在机器路径\n self.start_time = None # 开始运行时间\n self.thread_num = 0 # 运行的线程数目\n self.process_task_num = 0 # 总共处理task个数\n self.error_task_num = 0 # 出错task个数\n self.type = None # slave类型\n self.last_heartbeat = None # 最后一次上报心跳信息时间\n self.status = 0 # 状态, 1表示正常,-1表示丢失连接\n self.request_task_num = 0 # 实时请求次数\n\n @property\n def service_address(self):\n '''\n return service address\n '''\n return \"{0}:{1}\".format(self.local_ip, self.port)\n\n def __str__(self):\n return json.dumps(self.__dict__)\n","repo_name":"20113261/lave","sub_path":"workspace/spider/SpiderClient/lib/crawler/controller/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11659752139","text":"from multiprocessing import Lock\nfrom typing import Dict, Any, Iterator, List, Union, Callable, Tuple\nfrom exasol_udf_mock_python.group import Group\nfrom exasol_udf_mock_python.mock_context import MockContext\nfrom exasol_udf_mock_python.mock_context_run_wrapper import MockContextRunWrapper\nfrom exasol_udf_mock_python.mock_exa_environment import MockExaEnvironment\n\n\ndef _loop_groups(ctx:MockContext, exa:MockExaEnvironment, runfunc:Callable):\n while ctx.next_group():\n _wrapped_run(ctx, exa, runfunc)\n\n\ndef _wrapped_run(ctx:MockContext, exa: MockExaEnvironment, runfunc: Callable):\n wrapped_ctx = MockContextRunWrapper(\n ctx, exa.meta.input_type, exa.meta.output_type, exa.meta.is_variadic_input)\n if exa.meta.input_type == \"SET\":\n if exa.meta.output_type == \"RETURNS\":\n run_with_returns(ctx, runfunc, wrapped_ctx)\n else:\n runfunc(wrapped_ctx)\n else:\n if exa.meta.output_type == \"RETURNS\":\n while (True):\n run_with_returns(ctx, runfunc, wrapped_ctx)\n if not ctx.next():\n break\n else:\n while (True):\n runfunc(wrapped_ctx)\n if not ctx.next():\n break\n\n\ndef run_with_returns(ctx, runfunc, wrapped_ctx):\n result = runfunc(wrapped_ctx)\n if isinstance(result, Tuple):\n ctx.emit(*result)\n else:\n ctx.emit(result)\n\n\nclass UDFMockExecutor:\n _lock = Lock()\n\n def _exec_run(self, exec_globals: Dict[str, Any], ctx: MockContext):\n codeObject = compile(\"__loop_groups(__mock_test_executor_ctx, exa, run)\", 'exec_run', 'exec')\n exec_locals = {}\n exec_globals[\"__mock_test_executor_ctx\"] = ctx\n exec_globals[\"__loop_groups\"] = _loop_groups\n exec(codeObject, exec_globals, exec_locals)\n\n def _exec_cleanup(self, exec_globals: Dict[str, Any]):\n codeObject = compile(\"cleanup()\", 'exec_cleanup', 'exec')\n exec(codeObject, exec_globals)\n\n def _exec_init(self, exa_environment: MockExaEnvironment) -> Dict[str, Any]:\n codeObject = compile(exa_environment.meta.script_code, 'udf', 'exec')\n exec_globals = {\"exa\": exa_environment}\n exec(codeObject, exec_globals)\n return exec_globals\n\n def run(self,\n input_groups:Union[Iterator[Group],List[Group]],\n exa_environment: MockExaEnvironment)\\\n ->List[Group]:\n with self._lock:\n if isinstance(input_groups,Iterator):\n ctx = MockContext(input_groups, exa_environment.meta)\n elif isinstance(input_groups,List):\n ctx = MockContext(iter(input_groups), exa_environment.meta)\n else:\n raise TypeError(f\"{type(input_groups)} for input_groups not supported\")\n exec_globals = self._exec_init(exa_environment)\n try:\n self._exec_run(exec_globals, ctx)\n finally:\n if \"cleanup\" in exec_globals:\n self._exec_cleanup(exec_globals)\n return ctx.output_groups\n","repo_name":"exasol/udf-mock-python","sub_path":"exasol_udf_mock_python/udf_mock_executor.py","file_name":"udf_mock_executor.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"35769926357","text":"import argparse\nimport xml.etree.ElementTree as etree\nfrom nltk.tokenize import RegexpTokenizer\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('file_name')\n parser.add_argument('--job_path',required=True)\n parser.add_argument('--num_partitions',type=int,required=True)\n args = parser.parse_args()\n\n file_name = args.file_name\n job_path = args.job_path\n num_partitions = args.num_partitions\n\n info_ret = etree.parse(file_name)\n root = info_ret.getroot()\n namespace = {'my_ns':'http://www.mediawiki.org/xml/export-0.10/'}\n\n site_info = root.find('my_ns:siteinfo', namespace)\n\n total_no_of_docs = float(len(root.findall('my_ns:page', namespace)))\n #taking care of case below if total_no_of_docs is not multiple of num_partitions\n docs_each_partition = int(total_no_of_docs/num_partitions)\n\n pages = root.findall('my_ns:page', namespace)\n #starting doc_id from 100\n doc_id = 100\n\n for i in range(num_partitions):\n root = etree.Element('root')\n root.append(site_info)\n start_index = i*docs_each_partition\n if i==num_partitions-1:\n end_index = int(total_no_of_docs)\n else:\n end_index = (i+1)*docs_each_partition\n for page in pages[start_index:end_index]:\n doc_id += 1\n etree.SubElement(page, \"{http://www.mediawiki.org/xml/export-0.10/}doc_id\").text = str(doc_id)\n root.append(page)\n tree = etree.ElementTree(root)\n tree.write(job_path+str(i)+'.in')\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"surgicaI/search-engine-architecture","sub_path":"assignment4/reformatter.py","file_name":"reformatter.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"30550895838","text":"from contextlib import contextmanager\nfrom io import BytesIO, IOBase, StringIO\nfrom pathlib import Path\nfrom typing import (\n Any,\n BinaryIO,\n Callable,\n ContextManager,\n Dict,\n Iterator,\n List,\n Mapping,\n Optional,\n TextIO,\n Tuple,\n Type,\n Union,\n overload,\n)\nfrom urllib.request import urlopen\n\nfrom qadataframe.utils import format_path, handle_projection_columns\n\ntry:\n import pyarrow as pa\n import pyarrow.csv\n import pyarrow.feather\n import pyarrow.parquet\n\n _PYARROW_AVAILABLE = True\nexcept ImportError: # pragma: no cover\n _PYARROW_AVAILABLE = False\n\nfrom qadataframe.convert import from_arrow\nfrom qadataframe.datatypes import DataType\nfrom qadataframe.internals import DataFrame, LazyFrame\n\ntry:\n from qadataframe.qadataframe import ipc_schema as _ipc_schema\nexcept ImportError: # pragma: no cover\n pass\n\ntry:\n import connectorx as cx\n\n _WITH_CX = True\nexcept ImportError:\n _WITH_CX = False\n\ntry:\n import fsspec\n from fsspec.utils import infer_storage_options\n\n _WITH_FSSPEC = True\nexcept ImportError:\n _WITH_FSSPEC = False\n\n\ndef _process_http_file(path: str) -> BytesIO:\n with urlopen(path) as f:\n return BytesIO(f.read())\n\n\n@overload\ndef _prepare_file_arg(\n file: Union[str, List[str], Path, BinaryIO, bytes], **kwargs: Any\n) -> ContextManager[Union[str, BinaryIO]]:\n ...\n\n\n@overload\ndef _prepare_file_arg(\n file: Union[str, TextIO, Path, BinaryIO, bytes], **kwargs: Any\n) -> ContextManager[Union[str, BinaryIO]]:\n ...\n\n\n@overload\ndef _prepare_file_arg(\n file: Union[str, List[str], TextIO, Path, BinaryIO, bytes], **kwargs: Any\n) -> ContextManager[Union[str, List[str], BinaryIO, List[BinaryIO]]]:\n ...\n\n\ndef _prepare_file_arg(\n file: Union[str, List[str], TextIO, Path, BinaryIO, bytes], **kwargs: Any\n) -> ContextManager[Union[str, BinaryIO, List[str], List[BinaryIO]]]:\n \"\"\"\n Utility for read_[csv, parquet]. (not to be used by scan_[csv, parquet]).\n Returned value is always usable as a context.\n\n A `StringIO`, `BytesIO` file is returned as a `BytesIO`\n A local path is returned as a string\n An http url is read into a buffer and returned as a `BytesIO`\n\n When fsspec is installed, remote file(s) is (are) opened with\n `fsspec.open(file, **kwargs)` or `fsspec.open_files(file, **kwargs)`.\n \"\"\"\n\n # Small helper to use a variable as context\n @contextmanager\n def managed_file(file: Any) -> Iterator[Any]:\n try:\n yield file\n finally:\n pass\n\n if isinstance(file, StringIO):\n return BytesIO(file.read().encode(\"utf8\"))\n if isinstance(file, BytesIO):\n return managed_file(file)\n if isinstance(file, Path):\n return managed_file(format_path(file))\n if isinstance(file, str):\n if _WITH_FSSPEC:\n if infer_storage_options(file)[\"protocol\"] == \"file\":\n return managed_file(format_path(file))\n return fsspec.open(file, **kwargs)\n if file.startswith(\"http\"):\n return _process_http_file(file)\n if isinstance(file, list) and bool(file) and all(isinstance(f, str) for f in file):\n if _WITH_FSSPEC:\n if all(infer_storage_options(f)[\"protocol\"] == \"file\" for f in file):\n return managed_file([format_path(f) for f in file])\n return fsspec.open_files(file, **kwargs)\n if isinstance(file, str):\n file = format_path(file)\n return managed_file(file)\n\n\ndef update_columns(df: DataFrame, new_columns: List[str]) -> DataFrame:\n if df.width > len(new_columns):\n cols = df.columns\n for i, name in enumerate(new_columns):\n cols[i] = name\n new_columns = cols\n df.columns = new_columns\n return df\n\n\ndef read_csv(\n file: Union[str, TextIO, BytesIO, Path, BinaryIO, bytes],\n has_header: bool = True,\n columns: Optional[Union[List[int], List[str]]] = None,\n new_columns: Optional[List[str]] = None,\n sep: str = \",\",\n comment_char: Optional[str] = None,\n quote_char: Optional[str] = r'\"',\n skip_rows: int = 0,\n dtypes: Optional[Union[Mapping[str, Type[DataType]], List[Type[DataType]]]] = None,\n null_values: Optional[Union[str, List[str], Dict[str, str]]] = None,\n ignore_errors: bool = False,\n parse_dates: bool = False,\n n_threads: Optional[int] = None,\n infer_schema_length: Optional[int] = 100,\n batch_size: int = 8192,\n n_rows: Optional[int] = None,\n encoding: str = \"utf8\",\n low_memory: bool = False,\n rechunk: bool = True,\n use_pyarrow: bool = False,\n storage_options: Optional[Dict] = None,\n skip_rows_after_header: int = 0,\n row_count_name: Optional[str] = None,\n row_count_offset: int = 0,\n **kwargs: Any,\n) -> DataFrame:\n \"\"\"\n Read a CSV file into a Dataframe.\n\n Parameters\n ----------\n file\n Path to a file or a file-like object.\n By file-like object, we refer to objects with a ``read()``\n method, such as a file handler (e.g. via builtin ``open``\n function) or ``StringIO`` or ``BytesIO``.\n If ``fsspec`` is installed, it will be used to open remote\n files.\n has_header\n Indicate if the first row of dataset is a header or not.\n If set to False, column names will be autogenrated in the\n following format: ``column_x``, with ``x`` being an\n enumeration over every column in the dataset starting at 1.\n columns\n Columns to select. Accepts a list of column indices (starting\n at zero) or a list of column names.\n new_columns\n Rename columns right after parsing the CSV file. If the given\n list is shorter than the width of the DataFrame the remaining\n columns will have their original name.\n sep\n Character to use as delimiter in the file.\n comment_char\n Character that indicates the start of a comment line, for\n instance ``#``.\n quote_char\n Single byte character used for csv quoting, default = ``\"``.\n Set to None to turn off special handling and escaping of quotes.\n skip_rows\n Start reading after ``skip_rows`` lines.\n dtypes\n Overwrite dtypes during inference.\n null_values\n Values to interpret as null values. You can provide a:\n - ``str``: All values equal to this string will be null.\n - ``List[str]``: A null value per column.\n - ``Dict[str, str]``: A dictionary that maps column name to a\n null value string.\n ignore_errors\n Try to keep reading lines if some lines yield errors.\n First try ``infer_schema_length=0`` to read all columns as\n ``pl.Utf8`` to check which values might cause an issue.\n parse_dates\n Try to automatically parse dates. If this does not succeed,\n the column remains of data type ``pl.Utf8``.\n n_threads\n Number of threads to use in csv parsing.\n Defaults to the number of physical cpu's of your system.\n infer_schema_length\n Maximum number of lines to read to infer schema.\n If set to 0, all columns will be read as ``pl.Utf8``.\n If set to ``None``, a full table scan will be done (slow).\n batch_size\n Number of lines to read into the buffer at once.\n Modify this to change performance.\n n_rows\n Stop reading from CSV file after reading ``n_rows``.\n During multi-threaded parsing, an upper bound of ``n_rows``\n rows cannot be guaranteed.\n encoding\n Allowed encodings: ``utf8`` or ``utf8-lossy``.\n Lossy means that invalid utf8 values are replaced with ``�``\n characters.\n low_memory\n Reduce memory usage at expense of performance.\n rechunk\n Make sure that all columns are contiguous in memory by\n aggregating the chunks into a single array.\n use_pyarrow\n Try to use pyarrow's native CSV parser.\n This is not always possible. The set of arguments given to\n this function determines if it is possible to use pyarrow's\n native parser. Note that pyarrow and qadataframe may have a\n different strategy regarding type inference.\n storage_options\n Extra options that make sense for ``fsspec.open()`` or a\n particular storage connection.\n e.g. host, port, username, password, etc.\n skip_rows_after_header\n Skip these number of rows when the header is parsed\n row_count_name\n If not None, this will insert a row count column with give name into the DataFrame\n row_count_offset\n Offset to start the row_count column (only use if the name is set)\n\n Returns\n -------\n DataFrame\n \"\"\"\n\n # Map legacy arguments to current ones and remove them from kwargs.\n has_header = kwargs.pop(\"has_headers\", has_header)\n dtypes = kwargs.pop(\"dtype\", dtypes)\n n_rows = kwargs.pop(\"stop_after_n_rows\", n_rows)\n\n if columns is None:\n columns = kwargs.pop(\"projection\", None)\n\n projection, columns = handle_projection_columns(columns)\n\n if isinstance(file, bytes) and len(file) == 0:\n raise ValueError(\"Empty bytes data provided\")\n\n storage_options = storage_options or {}\n\n if columns and not has_header:\n for column in columns:\n if isinstance(column, str) and not column.startswith(\"column_\"):\n raise ValueError(\n 'Specified column names do not start with \"column_\", '\n \"but autogenerated header names were requested.\"\n )\n\n if use_pyarrow and not _PYARROW_AVAILABLE:\n raise ImportError(\n \"'pyarrow' is required when using 'read_csv(..., use_pyarrow=True)'.\"\n )\n\n if (\n use_pyarrow\n and dtypes is None\n and n_rows is None\n and n_threads is None\n and encoding == \"utf8\"\n and not low_memory\n and null_values is None\n and parse_dates\n ):\n include_columns = None\n\n if columns:\n if not has_header:\n # Convert 'column_1', 'column_2', ... column names to 'f0', 'f1', ... column names for pyarrow,\n # if CSV file does not contain a header.\n include_columns = [f\"f{int(column[7:]) - 1}\" for column in columns]\n else:\n include_columns = columns\n\n if not columns and projection:\n # Convert column indices from projection to 'f0', 'f1', ... column names for pyarrow.\n include_columns = [f\"f{column_idx}\" for column_idx in projection]\n\n with _prepare_file_arg(file, **storage_options) as data:\n tbl = pa.csv.read_csv(\n data,\n pa.csv.ReadOptions(\n skip_rows=skip_rows, autogenerate_column_names=not has_header\n ),\n pa.csv.ParseOptions(delimiter=sep),\n pa.csv.ConvertOptions(\n column_types=None,\n include_columns=include_columns,\n include_missing_columns=ignore_errors,\n ),\n )\n\n if not has_header:\n # Rename 'f0', 'f1', ... columns names autogenated by pyarrow to 'column_1', 'column_2', ...\n tbl = tbl.rename_columns(\n [f\"column_{int(column[1:]) + 1}\" for column in tbl.column_names]\n )\n\n df = from_arrow(tbl, rechunk)\n if new_columns:\n return update_columns(df, new_columns) # type: ignore\n return df # type: ignore\n\n if new_columns and dtypes and isinstance(dtypes, dict):\n current_columns = None\n\n # As new column names are not available yet while parsing the CSV file, rename column names in\n # dtypes to old names (if possible) so they can be used during CSV parsing.\n if columns:\n if len(columns) < len(new_columns):\n raise ValueError(\n \"More new colum names are specified than there are selected columns.\"\n )\n\n # Get column names of requested columns.\n current_columns = columns[0 : len(new_columns)]\n elif not has_header:\n # When there are no header, column names are autogenerated (and known).\n\n if projection:\n if columns and len(columns) < len(new_columns):\n raise ValueError(\n \"More new colum names are specified than there are selected columns.\"\n )\n # Convert column indices from projection to 'column_1', 'column_2', ... column names.\n current_columns = [\n f\"column_{column_idx + 1}\" for column_idx in projection\n ]\n else:\n # Generate autogenerated 'column_1', 'column_2', ... column names for new column names.\n current_columns = [\n f\"column_{column_idx}\"\n for column_idx in range(1, len(new_columns) + 1)\n ]\n else:\n # When a header is present, column names are not known yet.\n\n if len(dtypes) <= len(new_columns):\n # If dtypes dictionary contains less or same amount of values than new column names\n # a list of dtypes can be created if all listed column names in dtypes dictionary\n # appear in the first consecutive new column names.\n dtype_list = [\n dtypes[new_column_name]\n for new_column_name in new_columns[0 : len(dtypes)]\n if new_column_name in dtypes\n ]\n\n if len(dtype_list) == len(dtypes):\n dtypes = dtype_list\n\n if current_columns and isinstance(dtypes, dict):\n new_to_current = {\n new_column: current_column\n for new_column, current_column in zip(new_columns, current_columns)\n }\n # Change new column names to current column names in dtype.\n dtypes = {\n new_to_current.get(column_name, column_name): column_dtype\n for column_name, column_dtype in dtypes.items()\n }\n\n with _prepare_file_arg(file, **storage_options) as data:\n df = DataFrame._read_csv(\n file=data,\n has_header=has_header,\n columns=columns if columns else projection,\n sep=sep,\n comment_char=comment_char,\n quote_char=quote_char,\n skip_rows=skip_rows,\n dtypes=dtypes,\n null_values=null_values,\n ignore_errors=ignore_errors,\n parse_dates=parse_dates,\n n_threads=n_threads,\n infer_schema_length=infer_schema_length,\n batch_size=batch_size,\n n_rows=n_rows,\n encoding=encoding,\n low_memory=low_memory,\n rechunk=rechunk,\n skip_rows_after_header=skip_rows_after_header,\n row_count_name=row_count_name,\n row_count_offset=row_count_offset,\n )\n\n if new_columns:\n return update_columns(df, new_columns)\n return df\n\n\ndef scan_csv(\n file: Union[str, Path],\n has_header: bool = True,\n sep: str = \",\",\n comment_char: Optional[str] = None,\n quote_char: Optional[str] = r'\"',\n skip_rows: int = 0,\n dtypes: Optional[Dict[str, Type[DataType]]] = None,\n null_values: Optional[Union[str, List[str], Dict[str, str]]] = None,\n ignore_errors: bool = False,\n cache: bool = True,\n with_column_names: Optional[Callable[[List[str]], List[str]]] = None,\n infer_schema_length: Optional[int] = 100,\n n_rows: Optional[int] = None,\n encoding: str = \"utf8\",\n low_memory: bool = False,\n rechunk: bool = True,\n skip_rows_after_header: int = 0,\n row_count_name: Optional[str] = None,\n row_count_offset: int = 0,\n parse_dates: bool = False,\n **kwargs: Any,\n) -> LazyFrame:\n \"\"\"\n Lazily read from a CSV file or multiple files via glob patterns.\n\n This allows the query optimizer to push down predicates and\n projections to the scan level, thereby potentially reducing\n memory overhead.\n\n Parameters\n ----------\n file\n Path to a file.\n has_header\n Indicate if the first row of dataset is a header or not.\n If set to False, column names will be autogenrated in the\n following format: ``column_x``, with ``x`` being an\n enumeration over every column in the dataset starting at 1.\n sep\n Character to use as delimiter in the file.\n comment_char\n Character that indicates the start of a comment line, for\n instance ``#``.\n quote_char\n Single byte character used for csv quoting, default = ``\"``.\n Set to None to turn off special handling and escaping of quotes.\n skip_rows\n Start reading after ``skip_rows`` lines. The header will be parsed at this offset.\n dtypes\n Overwrite dtypes during inference.\n null_values\n Values to interpret as null values. You can provide a:\n - ``str``: All values equal to this string will be null.\n - ``List[str]``: A null value per column.\n - ``Dict[str, str]``: A dictionary that maps column name to a\n null value string.\n ignore_errors\n Try to keep reading lines if some lines yield errors.\n First try ``infer_schema_length=0`` to read all columns as\n ``pl.Utf8`` to check which values might cause an issue.\n cache\n Cache the result after reading.\n with_column_names\n Apply a function over the column names.\n This can be used to update a schema just in time, thus before\n scanning.\n infer_schema_length\n Maximum number of lines to read to infer schema.\n If set to 0, all columns will be read as ``pl.Utf8``.\n If set to ``None``, a full table scan will be done (slow).\n n_rows\n Stop reading from CSV file after reading ``n_rows``.\n encoding\n Allowed encodings: ``utf8`` or ``utf8-lossy``.\n Lossy means that invalid utf8 values are replaced with ``�``\n characters.\n low_memory\n Reduce memory usage in expense of performance.\n rechunk\n Reallocate to contiguous memory when all chunks/ files are parsed.\n skip_rows_after_header\n Skip these number of rows when the header is parsed\n row_count_name\n If not None, this will insert a row count column with give name into the DataFrame\n row_count_offset\n Offset to start the row_count column (only use if the name is set)\n parse_dates\n Try to automatically parse dates. If this does not succeed,\n the column remains of data type ``pl.Utf8``.\n\n Examples\n --------\n >>> (\n ... pl.scan_csv(\"my_long_file.csv\") # lazy, doesn't do a thing\n ... .select(\n ... [\"a\", \"c\"]\n ... ) # select only 2 columns (other columns will not be read)\n ... .filter(\n ... pl.col(\"a\") > 10\n ... ) # the filter is pushed down the the scan, so less data read in memory\n ... .fetch(100) # pushed a limit of 100 rows to the scan level\n ... ) # doctest: +SKIP\n\n We can use `with_column_names` to modify the header before scanning:\n\n >>> df = pl.DataFrame(\n ... {\"BrEeZaH\": [1, 2, 3, 4], \"LaNgUaGe\": [\"is\", \"terrible\", \"to\", \"read\"]}\n ... )\n >>> df.to_csv(\"mydf.csv\")\n >>> pl.scan_csv(\n ... \"mydf.csv\", with_column_names=lambda cols: [col.lower() for col in cols]\n ... ).fetch()\n shape: (4, 2)\n ┌─────────┬──────────┐\n │ breezah ┆ language │\n │ --- ┆ --- │\n │ i64 ┆ str │\n ╞═════════╪══════════╡\n │ 1 ┆ is │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤\n │ 2 ┆ terrible │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤\n │ 3 ┆ to │\n ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤\n │ 4 ┆ read │\n └─────────┴──────────┘\n\n\n \"\"\"\n\n # Map legacy arguments to current ones and remove them from kwargs.\n has_header = kwargs.pop(\"has_headers\", has_header)\n dtypes = kwargs.pop(\"dtype\", dtypes)\n n_rows = kwargs.pop(\"stop_after_n_rows\", n_rows)\n\n if isinstance(file, (str, Path)):\n file = format_path(file)\n\n return LazyFrame.scan_csv(\n file=file,\n has_header=has_header,\n sep=sep,\n comment_char=comment_char,\n quote_char=quote_char,\n skip_rows=skip_rows,\n dtypes=dtypes,\n null_values=null_values,\n ignore_errors=ignore_errors,\n cache=cache,\n with_column_names=with_column_names,\n infer_schema_length=infer_schema_length,\n n_rows=n_rows,\n low_memory=low_memory,\n rechunk=rechunk,\n skip_rows_after_header=skip_rows_after_header,\n encoding=encoding,\n row_count_name=row_count_name,\n row_count_offset=row_count_offset,\n parse_dates=parse_dates,\n )\n\n\ndef scan_ipc(\n file: Union[str, Path],\n n_rows: Optional[int] = None,\n cache: bool = True,\n rechunk: bool = True,\n row_count_name: Optional[str] = None,\n row_count_offset: int = 0,\n **kwargs: Any,\n) -> LazyFrame:\n \"\"\"\n Lazily read from an Arrow IPC (Feather v2) file or multiple files via glob patterns.\n\n This allows the query optimizer to push down predicates and projections to the scan level,\n thereby potentially reducing memory overhead.\n\n Parameters\n ----------\n file\n Path to a IPC file.\n n_rows\n Stop reading from IPC file after reading ``n_rows``.\n cache\n Cache the result after reading.\n rechunk\n Reallocate to contiguous memory when all chunks/ files are parsed.\n row_count_name\n If not None, this will insert a row count column with give name into the DataFrame\n row_count_offset\n Offset to start the row_count column (only use if the name is set)\n \"\"\"\n\n # Map legacy arguments to current ones and remove them from kwargs.\n n_rows = kwargs.pop(\"stop_after_n_rows\", n_rows)\n\n if isinstance(file, (str, Path)):\n file = format_path(file)\n\n return LazyFrame.scan_ipc(\n file=file,\n n_rows=n_rows,\n cache=cache,\n rechunk=rechunk,\n row_count_name=row_count_name,\n row_count_offset=row_count_offset,\n )\n\n\ndef scan_parquet(\n file: Union[str, Path],\n n_rows: Optional[int] = None,\n cache: bool = True,\n parallel: bool = True,\n rechunk: bool = True,\n row_count_name: Optional[str] = None,\n row_count_offset: int = 0,\n **kwargs: Any,\n) -> LazyFrame:\n \"\"\"\n Lazily read from a parquet file or multiple files via glob patterns.\n\n This allows the query optimizer to push down predicates and projections to the scan level,\n thereby potentially reducing memory overhead.\n\n Parameters\n ----------\n file\n Path to a file.\n n_rows\n Stop reading from parquet file after reading ``n_rows``.\n cache\n Cache the result after reading.\n parallel\n Read the parquet file in parallel. The single threaded reader consumes less memory.\n rechunk\n In case of reading multiple files via a glob pattern rechunk the final DataFrame into contiguous memory chunks.\n row_count_name\n If not None, this will insert a row count column with give name into the DataFrame\n row_count_offset\n Offset to start the row_count column (only use if the name is set)\n \"\"\"\n\n # Map legacy arguments to current ones and remove them from kwargs.\n n_rows = kwargs.pop(\"stop_after_n_rows\", n_rows)\n\n if isinstance(file, (str, Path)):\n file = format_path(file)\n\n return LazyFrame.scan_parquet(\n file=file,\n n_rows=n_rows,\n cache=cache,\n parallel=parallel,\n rechunk=rechunk,\n row_count_name=row_count_name,\n row_count_offset=row_count_offset,\n )\n\n\ndef read_ipc_schema(\n file: Union[str, BinaryIO, Path, bytes]\n) -> Dict[str, Type[DataType]]:\n \"\"\"\n Get a schema of the IPC file without reading data.\n\n Parameters\n ----------\n file\n Path to a file or a file-like object.\n\n Returns\n -------\n Dictionary mapping column names to datatypes\n \"\"\"\n return _ipc_schema(file)\n\n\ndef read_avro(\n file: Union[str, Path, BytesIO, BinaryIO],\n columns: Optional[Union[List[int], List[str]]] = None,\n n_rows: Optional[int] = None,\n **kwargs: Any,\n) -> DataFrame:\n \"\"\"\n Read into a DataFrame from Apache Avro format.\n\n Parameters\n ----------\n file\n Path to a file or a file-like object.\n columns\n Columns to select. Accepts a list of column indices (starting at zero) or a list of column names.\n n_rows\n Stop reading from Apache Avro file after reading ``n_rows``.\n\n Returns\n -------\n DataFrame\n \"\"\"\n if isinstance(file, (str, Path)):\n file = format_path(file)\n if columns is None:\n columns = kwargs.pop(\"projection\", None)\n\n return DataFrame._read_avro(file, n_rows=n_rows, columns=columns)\n\n\ndef read_ipc(\n file: Union[str, BinaryIO, BytesIO, Path, bytes],\n columns: Optional[Union[List[int], List[str]]] = None,\n n_rows: Optional[int] = None,\n use_pyarrow: bool = _PYARROW_AVAILABLE,\n memory_map: bool = True,\n storage_options: Optional[Dict] = None,\n row_count_name: Optional[str] = None,\n row_count_offset: int = 0,\n **kwargs: Any,\n) -> DataFrame:\n \"\"\"\n Read into a DataFrame from Arrow IPC (Feather v2) file.\n\n Parameters\n ----------\n file\n Path to a file or a file-like object.\n If ``fsspec`` is installed, it will be used to open remote files.\n columns\n Columns to select. Accepts a list of column indices (starting at zero) or a list of column names.\n n_rows\n Stop reading from IPC file after reading ``n_rows``.\n Only valid when `use_pyarrow=False`.\n use_pyarrow\n Use pyarrow or the native rust reader.\n memory_map\n Memory map underlying file. This will likely increase performance.\n Only used when ``use_pyarrow=True``.\n storage_options\n Extra options that make sense for ``fsspec.open()`` or a particular storage connection, e.g. host, port, username, password, etc.\n row_count_name\n If not None, this will insert a row count column with give name into the DataFrame\n row_count_offset\n Offset to start the row_count column (only use if the name is set)\n\n Returns\n -------\n DataFrame\n \"\"\"\n\n # Map legacy arguments to current ones and remove them from kwargs.\n n_rows = kwargs.pop(\"stop_after_n_rows\", n_rows)\n\n if columns is None:\n columns = kwargs.pop(\"projection\", None)\n\n if use_pyarrow:\n if row_count_name is not None:\n raise ValueError(\n \"``row_count_name`` cannot be used with ``use_pyarrow=True``.\"\n )\n if n_rows:\n raise ValueError(\"``n_rows`` cannot be used with ``use_pyarrow=True``.\")\n\n storage_options = storage_options or {}\n with _prepare_file_arg(file, **storage_options) as data:\n if use_pyarrow:\n if not _PYARROW_AVAILABLE:\n raise ImportError(\n \"'pyarrow' is required when using 'read_ipc(..., use_pyarrow=True)'.\"\n )\n\n tbl = pa.feather.read_table(data, memory_map=memory_map, columns=columns)\n return DataFrame._from_arrow(tbl)\n\n return DataFrame._read_ipc(\n data,\n columns=columns,\n n_rows=n_rows,\n row_count_name=row_count_name,\n row_count_offset=row_count_offset,\n )\n\n\ndef read_parquet(\n source: Union[str, Path, BinaryIO, BytesIO, bytes],\n columns: Optional[Union[List[int], List[str]]] = None,\n n_rows: Optional[int] = None,\n use_pyarrow: bool = False,\n memory_map: bool = True,\n storage_options: Optional[Dict] = None,\n parallel: bool = True,\n row_count_name: Optional[str] = None,\n row_count_offset: int = 0,\n **kwargs: Any,\n) -> DataFrame:\n \"\"\"\n Read into a DataFrame from a parquet file.\n\n Parameters\n ----------\n source\n Path to a file, or a file-like object. If the path is a directory, that directory will be used\n as partition aware scan.\n If ``fsspec`` is installed, it will be used to open remote files.\n columns\n Columns to select. Accepts a list of column indices (starting at zero) or a list of column names.\n n_rows\n Stop reading from parquet file after reading ``n_rows``.\n Only valid when `use_pyarrow=False`.\n use_pyarrow\n Use pyarrow instead of the rust native parquet reader. The pyarrow reader is more stable.\n memory_map\n Memory map underlying file. This will likely increase performance.\n Only used when ``use_pyarrow=True``.\n storage_options\n Extra options that make sense for ``fsspec.open()`` or a particular storage connection, e.g. host, port, username, password, etc.\n parallel\n Read the parquet file in parallel. The single threaded reader consumes less memory.\n row_count_name\n If not None, this will insert a row count column with give name into the DataFrame\n row_count_offset\n Offset to start the row_count column (only use if the name is set)\n **kwargs\n kwargs for [pyarrow.parquet.read_table](https://arrow.apache.org/docs/python/generated/pyarrow.parquet.read_table.html)\n\n Returns\n -------\n DataFrame\n \"\"\"\n\n # Map legacy arguments to current ones and remove them from kwargs.\n n_rows = kwargs.pop(\"stop_after_n_rows\", n_rows)\n\n if columns is None:\n columns = kwargs.pop(\"projection\", None)\n\n if use_pyarrow:\n if n_rows:\n raise ValueError(\"``n_rows`` cannot be used with ``use_pyarrow=True``.\")\n\n storage_options = storage_options or {}\n with _prepare_file_arg(source, **storage_options) as source_prep:\n if use_pyarrow:\n if not _PYARROW_AVAILABLE:\n raise ImportError(\n \"'pyarrow' is required when using 'read_parquet(..., use_pyarrow=True)'.\"\n )\n\n return from_arrow( # type: ignore[return-value]\n pa.parquet.read_table(\n source_prep,\n memory_map=memory_map,\n columns=columns,\n **kwargs,\n )\n )\n\n return DataFrame._read_parquet(\n source_prep,\n columns=columns,\n n_rows=n_rows,\n parallel=parallel,\n row_count_name=row_count_name,\n row_count_offset=row_count_offset,\n )\n\n\ndef read_json(source: Union[str, IOBase], json_lines: bool = False) -> DataFrame:\n \"\"\"\n Read into a DataFrame from JSON format.\n\n Parameters\n ----------\n source\n Path to a file or a file-like object.\n json_lines\n Toggle between \"JSON\" and \"NDJSON\" format\n \"\"\"\n return DataFrame._read_json(source, json_lines)\n\n\ndef read_sql(\n sql: Union[List[str], str],\n connection_uri: str,\n partition_on: Optional[str] = None,\n partition_range: Optional[Tuple[int, int]] = None,\n partition_num: Optional[int] = None,\n protocol: Optional[str] = None,\n) -> DataFrame:\n \"\"\"\n Read a SQL query into a DataFrame.\n Make sure to install connectorx>=0.2\n\n # Sources\n Supports reading a sql query from the following data sources:\n\n * Postgres\n * Mysql\n * Sqlite\n * Redshift (through postgres protocol)\n * Clickhouse (through mysql protocol)\n\n ## Source not supported?\n If a database source is not supported, pandas can be used to load the query:\n\n >>> import pandas as pd\n >>> df = pl.from_pandas(pd.read_sql(sql, engine)) # doctest: +SKIP\n\n Parameters\n ----------\n sql\n raw sql query.\n connection_uri\n connectorx connection uri:\n - \"postgresql://username:password@server:port/database\"\n partition_on\n the column on which to partition the result.\n partition_range\n the value range of the partition column.\n partition_num\n how many partitions to generate.\n protocol\n backend-specific transfer protocol directive; see connectorx documentation for details.\n\n Examples\n --------\n\n ## Single threaded\n Read a DataFrame from a SQL query using a single thread:\n\n >>> uri = \"postgresql://username:password@server:port/database\"\n >>> query = \"SELECT * FROM lineitem\"\n >>> pl.read_sql(query, uri) # doctest: +SKIP\n\n ## Using 10 threads\n Read a DataFrame in parallel using 10 threads by automatically partitioning the provided SQL on the partition column:\n\n >>> uri = \"postgresql://username:password@server:port/database\"\n >>> query = \"SELECT * FROM lineitem\"\n >>> pl.read_sql(\n ... query, uri, partition_on=\"partition_col\", partition_num=10\n ... ) # doctest: +SKIP\n\n ## Using\n Read a DataFrame in parallel using 2 threads by explicitly providing two SQL queries:\n\n >>> uri = \"postgresql://username:password@server:port/database\"\n >>> queries = [\n ... \"SELECT * FROM lineitem WHERE partition_col <= 10\",\n ... \"SELECT * FROM lineitem WHERE partition_col > 10\",\n ... ]\n >>> pl.read_sql(uri, queries) # doctest: +SKIP\n\n \"\"\"\n if _WITH_CX:\n tbl = cx.read_sql(\n conn=connection_uri,\n query=sql,\n return_type=\"arrow\",\n partition_on=partition_on,\n partition_range=partition_range,\n partition_num=partition_num,\n protocol=protocol,\n )\n return from_arrow(tbl) # type: ignore[return-value]\n else:\n raise ImportError(\n \"connectorx is not installed.\" \"Please run pip install connectorx>=0.2.2\"\n )\n","repo_name":"yutiansut/qadataframe-rs","sub_path":"qadataframe/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":34138,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"29684355500","text":"# File: EasterSunday.py\n# Student: .zihao hong\n# UT EID:zh4473\n# Course Name: CS303E\n# \n# Date Created:2/5/2021\n# Date Last Modified: 2/5/2021\n# Description of Program: easter sunday date calculator\ny = int(input(\"Enter a number: \"))\na=y%19\nb=y//100\nc=y%100\nd=b//4\ne=b%4\ng=(8*b+13)//25\nh=(19*a+b-d-g+15)%30\nj=c//4\nk=c%4\nm=(a+11*h)//319\nr=(2*e+2*j-k-h+m+32)%7\nn=(h-m+r+90)//25\np=(h-m+r+n+19)%32\nprint(\"In\",y,\"Easter Sunday is on month\",n,\"and day\",p)\ninput(\"press enter to exit\")","repo_name":"Moon-is-beautiful/zihao2021","sub_path":"EasterSunday.py","file_name":"EasterSunday.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20154285342","text":"#! /usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n# Copyright (C) 2019 Stefan Hagmann\r\nimport sys\r\nimport cv2\r\nfrom pathlib import Path\r\nimport PyQt5\r\nfrom PyQt5 import uic\r\nfrom PyQt5.QtWidgets import QApplication\r\nfrom classes.OvelayIcons.IconStack import IconStack\r\nfrom classes.OvelayIcons.OpenCVLib import OpenCVLib\r\n\r\n\r\nclass MAIN_UI(PyQt5.QtWidgets.QMainWindow):\r\n\r\n def __init__(self, parent=None):\r\n super(MAIN_UI, self).__init__(parent)\r\n self.rootDir = Path(__file__).parent\r\n uifile = self.rootDir.joinpath('main.ui')\r\n self.ui = uic.loadUi(uifile, self) # load UI inside QMainWindow\r\n self.ui.btn_add.clicked.connect(lambda: self.addIcon())\r\n self.ui.btn_remove.clicked.connect(lambda: self.removeIcon())\r\n\r\n self.cv = OpenCVLib()\r\n self.CVTest(self.ui.image.pixmap())\r\n\r\n # IconStack, Image and IconPath\r\n self.stack = IconStack(self.ui.image.pixmap())\r\n self.stack.repaint_event.connect(self.repaint_event)\r\n \r\n def repaint_event(self):\r\n \"\"\" Pixmap has changed > repaint \"\"\"\r\n self.ui.image.setPixmap(self.stack.getPixmap())\r\n\r\n def closeEvent(self, event):\r\n ''' window tries to close '''\r\n # event.ignore()\r\n self.stack.close()\r\n pass\r\n\r\n def addIcon(self):\r\n self.stack.addExamIconON()\r\n self.stack.addExamIconOFF()\r\n self.stack.addFileReceivedOK()\r\n self.stack.addFileReceivedERROR()\r\n self.stack.addOffline()\r\n\r\n def removeIcon(self):\r\n self.stack.removeExamIconON()\r\n self.stack.removeExamIconOFF()\r\n self.stack.removeFileReceivedOK()\r\n self.stack.removeFileReceivedERROR()\r\n self.stack.removeOffline()\r\n \r\n def CVTest(self, pixmap):\r\n # TEST 1 ------------------------------------------------\r\n Qimg = pixmap.toImage()\r\n img = self.cv.QImage2MAT(Qimg)\r\n # or load from file\r\n # image = cv2.imread(\"mexico.jpg\")\r\n overlay = img.copy()\r\n output = img.copy()\r\n # red rectangle for demo\r\n cv2.rectangle(overlay, (420, 205), (595, 385), (0, 0, 255), -1)\r\n # apply the overlay\r\n # img, alpha, original, beta, gamma, output\r\n alpha = 0.5\r\n cv2.addWeighted(overlay, alpha, output, 1 - alpha, 1.0, output)\r\n # write back\r\n self.ui.image.setPixmap(self.cv.MAT2QPixmap(output))\r\n\r\n # TEST 2 ------------------------------------------------\r\n # Icon Test\r\n icon = self.cv.readPNG(\"overlay_icons/file_ok.png\")\r\n icon = self.cv.resizeTo(icon, 64, 64)\r\n pixmap = self.cv.overlayIcon(self.ui.image.pixmap(), icon, 100, 10)\r\n # write back\r\n self.ui.image.setPixmap(pixmap)\r\n\r\n\r\ndef main():\r\n app = QApplication(sys.argv)\r\n\r\n # show main Window\r\n gui = MAIN_UI() #noqa\r\n gui.show()\r\n app.exec_()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"SManAT/PyStuff","sub_path":"Qt/OvelayIcons/OvelayIconsTest.py","file_name":"OvelayIconsTest.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"41129070674","text":"from ex1_utils import *\nfrom gamma import gammaDisplay\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nfrom Assigment1.ex1_utils import imDisplay, imReadAndConvert, transformRGB2YIQ, LOAD_RGB, LOAD_GRAY_SCALE, myID, \\\n quantizeImage, hsitogramEqualize\n\n\ndef histEqDemo(img_path: str, rep: int):\n img = imReadAndConvert(img_path, rep)\n imgeq, histOrg, histEq = hsitogramEqualize(img)\n\n # Display cumsum\n cumsum = np.cumsum(histOrg)\n cumsumEq = np.cumsum(histEq)\n plt.gray()\n plt.plot(range(256), cumsum, 'r')\n plt.plot(range(256), cumsumEq, 'g')\n\n # Display the images\n plt.figure()\n plt.imshow(img)\n\n plt.figure()\n plt.imshow(imgeq)\n plt.show()\n\n\ndef quantDemo(img_path: str, rep: int):\n img = imReadAndConvert(img_path, rep)\n st = time.time()\n\n img_lst, err_lst = quantizeImage(img, 3, 10)\n\n print(\"Time:%.2f\" % (time.time() - st))\n print(\"Error 0:\\t %f\" % err_lst[0])\n print(\"Error last:\\t %f\" % err_lst[-1])\n\n plt.gray()\n plt.imshow(img_lst)\n plt.figure()\n plt.imshow(img_lst)\n\n plt.figure()\n plt.plot(err_lst, 'r')\n plt.show()\n\n\ndef main():\n print(\"ID:\", myID())\n img_path = 'ameen.JPGq'\n\n # Basic read and display\n imDisplay(img_path, LOAD_GRAY_SCALE)\n imDisplay(img_path, LOAD_RGB)\n # Convert Color spaces to RGB\n img = imReadAndConvert(img_path, LOAD_RGB)\n ###\n f,ax = plt.subplots(1, 2)\n ax[0].imshow(img)\n yiq_img = transformRGB2YIQ(img)\n ax[1].imshow(yiq_img)\n plt.show()\n\n # Image histEq\n print(\"The hist Demo 1\")\n histEqDemo(img_path, LOAD_GRAY_SCALE)\n print(\"The hist Demo 2\")\n histEqDemo(img_path, LOAD_RGB)\n\n # Image Quantization\n print(\"The quantzation\")\n quantDemo(img_path, LOAD_GRAY_SCALE)\n quantDemo(img_path, LOAD_RGB)\n\n # Gamma\n gammaDisplay(img_path, LOAD_GRAY_SCALE)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"aimanyounises1/Computer_vision_assigments_and_practicing","sub_path":"Assignment1/ex1_main.py","file_name":"ex1_main.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"3117124642","text":"# -*- coding: utf-8 -*-\nimport time\nimport traceback\nimport os\n\ndef removeTmpData(spath, dpath):\n \n #rmDirs = ['cfgfile','imgstatus','origimage','otfollowlist','otlist','otlistsub','starlist','varlist']\n keepDirs = ['cutimages','otfollowimg']\n \n tdirs = os.listdir(spath)\n tDateDirs = []\n for tdir in tdirs:\n if tdir[0]=='1' and len(tdir)==6:\n tDateDirs.append(tdir)\n tDateDirs.sort()\n for tdir in tDateDirs:\n print(\"process %s\"%(tdir))\n sDirs = \"%s/%s\"%(spath, tdir)\n #dDirs = \"%s/%s\"%(dpath, tdir)\n \n machines = os.listdir(sDirs)\n for tm in machines:\n sDirs2 = \"%s/%s\"%(sDirs, tm)\n tdataDirs = os.listdir(sDirs2)\n for tdataDir in tdataDirs:\n if tdataDir not in keepDirs:\n sDirs3 = \"%s/%s\"%(sDirs2, tdataDir)\n if os.path.exists(sDirs3):\n os.system(\"rm -rf %s\"%(sDirs3))\n \n time.sleep(1)\n \ndef batchCopy(spath, dpath):\n \n tdirs = os.listdir(spath)\n tDateDirs = []\n for tdir in tdirs:\n if tdir[0]=='1' and len(tdir)==6:\n tDateDirs.append(tdir)\n tDateDirs.sort()\n for tdir in tDateDirs:\n print(\"process %s\"%(tdir))\n try:\n tcmd = \"cd %s ; tar -c %s | ssh gwac@172.28.8.8 'tar -xf - -C %s'\"%(spath,tdir, dpath)\n print(tcmd)\n os.system(tcmd)\n except Exception as e:\n tstr = \"backup %s error: %s\"%(tdir, str(e))\n print(tstr)\n tstr = traceback.format_exc()\n print(tstr)\n \n time.sleep(1)\n\nif __name__ == '__main__':\n \n #spath = '/data/gwac_data'\n #dpath = '/data/mini_gwac_data'\n spath = '/data/gwac_data/mini-gwac-data-backup'\n dpath = '/data/mini_gwac_data/mini-gwac-data-backup'\n \n #removeTmpData(spath, dpath)\n batchCopy(spath, dpath)\n \n","repo_name":"archord/OTSimulation","sub_path":"gwac_pipeline/miniGWACDataCleanAndBackup.py","file_name":"miniGWACDataCleanAndBackup.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40428557098","text":"# Team 56\n# Yishan Shi 883166\n# Huiya Chen 894933\n# Tong He 867488\n# Yao Wang 869992\n# Aaron Robins 694098\n\nimport json\nimport linkDB\n\nregionMap = {\n 'adelaide': ['Adelaide - Central and Hills',\n 'Adelaide - North',\n 'Adelaide - South',\n 'Adelaide - West'],\n 'brisbane': ['Brisbane - East',\n 'Brisbane - North',\n 'Brisbane - South',\n 'Brisbane - West',\n 'Brisbane Inner City',\n 'Ipswich'],\n 'canberra': ['Australian Capital Territory'],\n 'hobart': ['Hobart'],\n 'melbourne': ['Melbourne - Inner',\n 'Melbourne - Inner East',\n 'Melbourne - Inner South',\n 'Melbourne - North East',\n 'Melbourne - North West',\n 'Melbourne - Outer East',\n 'Melbourne - South East',\n 'Melbourne - West',\n 'Geelong'],\n 'perth': ['Perth - Inner',\n 'Perth - North East',\n 'Perth - North West',\n 'Perth - South East',\n 'Perth - South West'],\n 'sydney': ['Sydney - Baulkham Hills and Hawkesbury',\n 'Sydney - Blacktown',\n 'Sydney - City and Inner South',\n 'Sydney - Eastern Suburbs',\n 'Sydney - Inner South West',\n 'Sydney - Inner West',\n 'Sydney - North Sydney and Hornsby',\n 'Sydney - Northern Beaches',\n 'Sydney - Outer South West',\n 'Sydney - Outer West and Blue Mountains',\n 'Sydney - Parramatta',\n 'Sydney - Ryde',\n 'Sydney - South West',\n 'Sydney - Sutherland']}\n\n\ndef loadaurin():\n aurindata = linkDB.get_aurindata('aurin','cd19601ff48fec927421d634e205006d')\n\n #print(aurindata)\n incomedata = []\n\n for city in aurindata[\"features\"]:\n regionInfo = {}\n pro = city[\"properties\"]\n if(pro[\"sa4_name16\"] in regionMap[\"adelaide\"] or pro[\"sa4_name16\"] in regionMap[\"brisbane\"]\n or pro[\"sa4_name16\"] in regionMap[\"canberra\"] or pro[\"sa4_name16\"] in regionMap[\"hobart\"]\n or pro[\"sa4_name16\"] in regionMap[\"melbourne\"] or pro[\"sa4_name16\"] in regionMap[\"perth\"]\n or pro[\"sa4_name16\"] in regionMap[\"sydney\"]):\n regionInfo[\"name\"] = pro[\"sa4_name16\"]\n regionInfo[\"high\"] = pro[\"hi_4000_more_tot\"]\n incomedata.append(regionInfo)\n\n incomedata = sorted(incomedata, key=lambda e: e[\"name\"], reverse=False)\n return incomedata\n\ndef pick_region(reg):\n\n result = linkDB.get_count_score(region=reg)\n\n return result\n\ndef pick_brand(reg):\n result = linkDB.region_brand(region=reg)\n result = sorted(result, key=lambda e: e[\"vader\"], reverse=False)\n return result\n\ndef income_drilldown():\n city = loadaurin()\n australia = [{\"name\":\"adelaide\",\"hightotal\":0,\"region\":[]},{\"name\":\"brisbane\",\"hightotal\":0,\"region\":[]},\n {\"name\": \"canberra\", \"hightotal\": 0, \"region\": []},{\"name\":\"hobart\",\"hightotal\":0,\"region\":[]},\n {\"name\": \"melbourne\", \"hightotal\": 0, \"region\": []},{\"name\":\"perth\",\"hightotal\":0,\"region\":[]},\n {\"name\": \"sydney\", \"hightotal\": 0, \"region\": []},]\n\n for city in city:\n if(city[\"name\"] in regionMap[\"adelaide\"]):\n australia[0][\"hightotal\"] = australia[0][\"hightotal\"] + city[\"high\"]\n australia[0][\"region\"].append(city)\n\n if city[\"name\"] in regionMap[\"brisbane\"]:\n australia[1][\"hightotal\"] = australia[1][\"hightotal\"] + city[\"high\"]\n australia[1][\"region\"].append(city)\n\n if city[\"name\"] in regionMap[\"canberra\"]:\n australia[2][\"hightotal\"] = australia[1][\"hightotal\"] + city[\"high\"]\n australia[2][\"region\"].append(city)\n\n if city[\"name\"] in regionMap[\"hobart\"]:\n australia[3][\"hightotal\"] = australia[1][\"hightotal\"] + city[\"high\"]\n australia[3][\"region\"].append(city)\n\n if city[\"name\"] in regionMap[\"melbourne\"]:\n australia[4][\"hightotal\"] = australia[1][\"hightotal\"] + city[\"high\"]\n australia[4][\"region\"].append(city)\n\n if city[\"name\"] in regionMap[\"perth\"]:\n australia[5][\"hightotal\"] = australia[1][\"hightotal\"] + city[\"high\"]\n australia[5][\"region\"].append(city)\n\n if city[\"name\"] in regionMap[\"sydney\"]:\n australia[6][\"hightotal\"] = australia[1][\"hightotal\"] + city[\"high\"]\n australia[6][\"region\"].append(city)\n\n\n return australia\n\ndef income_supercar():\n income = loadaurin()\n vader = linkDB.get_supercar()\n\n #print(len(income))\n #print(len(vader))\n\n for i1 in range(0,len(vader)):\n for i2 in range(0, len(income)):\n if income[i2][\"name\"] == vader[i1][\"region\"]:\n vader[i1][\"people\"] = income[i2][\"high\"]\n\n #print(vader)\n return vader\n\n\ndef update_tweet():\n res = linkDB.total_count()\n return res\n#income_supercar()\n\n#print(income_drilldown())\n\n\n\n","repo_name":"Fish-WY/CCCproject2-team56","sub_path":"web/deal.py","file_name":"deal.py","file_ext":"py","file_size_in_byte":4977,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"3652197888","text":"import pandas as pd\nimport sys\nfrom xml.dom import minidom\nimport numpy as np\nfrom nltk.stem.porter import PorterStemmer\nfrom unidecode import unidecode\nsys.path.insert(1, '../utils')\nfrom read_config import read_config_file\nfrom nltk.tokenize import RegexpTokenizer\n\nclass gerador():\n def __init__(self, input_path, results_path) -> None:\n self.inputs_path = input_path\n self.results_path = results_path\n self.config_dict = {}\n read_config_file(self.inputs_path + 'GLI.CFG', self.config_dict)\n print('Read Config File')\n print(self.config_dict)\n \n def _parse_xml(self):\n records_data = []\n for xml_file in self.config_dict['LEIA']:\n parsed_xml = minidom.parse(self.inputs_path + xml_file)\n records = parsed_xml.getElementsByTagName('RECORD')\n \n for record in records:\n recordnum = record.getElementsByTagName('RECORDNUM')[0].firstChild.nodeValue.strip()\n abstract = record.getElementsByTagName('ABSTRACT')\n extract = record.getElementsByTagName('EXTRACT')\n \n if len(abstract) > 0:\n text = unidecode(abstract[0].firstChild.nodeValue.strip().lower())\n elif len(extract) > 0:\n text = unidecode(extract[0].firstChild.nodeValue.strip().lower())\n else:\n text = ''\n \n records_data.append({'RECORDNUM': recordnum, 'TEXT': text})\n records_df = pd.DataFrame(records_data)\n return records_df\n def genereate_list(self):\n gli_dict = {}\n stemmer = PorterStemmer()\n records_df = self._parse_xml()\n for _, row in records_df.iterrows():\n text = row['TEXT']\n recordnum = row['RECORDNUM']\n tokenizer = RegexpTokenizer(r'[a-zA-Z]{3,}')\n words = tokenizer.tokenize(text) \n if self.config_dict['USE'][0] == 'STEMMER':\n words = [stemmer.stem(w) for w in words]\n for w in words:\n if w in gli_dict:\n gli_dict[w].append(recordnum)\n else:\n gli_dict[w] = [recordnum]\n print('Generete List With ' + self.config_dict['USE'][0])\n glig_df = pd.DataFrame({'WORDS': gli_dict.keys(), 'RECORDS': gli_dict.values()})\n glig_df.to_csv(self.results_path+self.config_dict['ESCREVA'][0], index=False, sep=';')\n print(glig_df)","repo_name":"MariaLuizaCw/busca-e-minera-o-de-texto","sub_path":"task2/src/gerador.py","file_name":"gerador.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"73376451010","text":"# libraries\r\n\r\nimport datetime\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n \r\nbegin_time = datetime.datetime.now()\r\n\r\nimport Equation\r\ndictionary = Equation.calculate()\r\n# Make a data frame\r\ndf=pd.DataFrame(dictionary)\r\n \r\n# Initialize the figure style\r\nplt.style.use('seaborn-darkgrid')\r\n \r\n# create a color palette\r\npalette = plt.get_cmap('Set1')\r\n \r\n# multiple line plot\r\nnum=0\r\nfor column in df.drop('x', axis=1):\r\n num+=1\r\n \r\n # Find the right spot on the plot\r\n plt.subplot(9,9, num)\r\n \r\n # plot every group, but discrete\r\n for v in df.drop('x', axis=1):\r\n plt.plot(df['x'], df[v], marker='', color='grey', linewidth=0.6, alpha=0.3)\r\n \r\n # Plot the lineplot\r\n plt.plot(df['x'], df[column], marker='', color=palette(0), linewidth=2.4, alpha=0.9, label=\"\")\r\n \r\n # Same limits for every chart\r\n plt.xlim(0,3000)\r\n plt.ylim(0,2000)\r\n \r\n # Not ticks everywhere\r\n if num in range(7) :\r\n plt.tick_params(labelbottom='off')\r\n if num not in [1,4,7] :\r\n plt.tick_params(labelleft='off')\r\n \r\n # Add title\r\n \r\n\r\n# general title\r\n\r\n \r\n# Axis titles\r\n\r\n\r\nprint(datetime.datetime.now() - begin_time)\r\n# Show the graph\r\nplt.show()\r\n","repo_name":"Frederick-Goupil/Twisted-Rectangle","sub_path":"MultiplGraph.py","file_name":"MultiplGraph.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33569048944","text":"from __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import cPickle as pickle\nfrom six.moves import range\n\npickle_file = 'notMNIST.pickle'\nseed = 19\n\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)\n\nimage_size = 28\nnum_labels = 10\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n # Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)\n\nbatch_size = 128\nlogs_path = \"/tmp/ud730\"\n\n# n_hidden_nodes = [20]\nn_hidden_nodes = [1024]\nn_all_nodes = [image_size * image_size, *n_hidden_nodes, num_labels]\n\ngraph = tf.Graph()\nwith graph.as_default():\n with tf.name_scope('input'):\n tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n\n # Variables.\n\n weights = [None] * (len(n_hidden_nodes) + 1)\n\n biases = [None] * (len(n_hidden_nodes) + 1)\n\n train_net_layer_out = [None] * (len(n_hidden_nodes) + 1)\n valid_net_layer_out = [None] * (len(n_hidden_nodes) + 1)\n test_net_layer_out = [None] * (len(n_hidden_nodes) + 1)\n\n for i in range(len(n_hidden_nodes) + 1):\n print(i, n_all_nodes[i], n_all_nodes[i + 1])\n with tf.name_scope('weights' + str(i)):\n weights[i] = tf.Variable(tf.truncated_normal([n_all_nodes[i], n_all_nodes[i + 1]], seed=seed))\n with tf.name_scope('biases' + str(i)):\n biases[i] = tf.Variable(tf.zeros([n_all_nodes[i + 1]]))\n with tf.name_scope('layers' + str(i)):\n if i == 0:\n train_net_layer_out[0] = tf.nn.relu(tf.matmul(tf_train_dataset, weights[0]) + biases[0])\n valid_net_layer_out[0] = tf.nn.relu(tf.matmul(tf_valid_dataset, weights[0]) + biases[0])\n test_net_layer_out[0] = tf.nn.relu(tf.matmul(tf_test_dataset, weights[0]) + biases[0])\n elif i < len(n_hidden_nodes):\n train_net_layer_out[i] = tf.nn.relu(tf.matmul(train_net_layer_out[i - 1], weights[i]) + biases[i])\n valid_net_layer_out[i] = tf.nn.relu(tf.matmul(valid_net_layer_out[i - 1], weights[i]) + biases[i])\n test_net_layer_out[i] = tf.nn.relu(tf.matmul(test_net_layer_out[i - 1], weights[i]) + biases[i])\n else:\n train_net_layer_out[i] = tf.matmul(train_net_layer_out[i - 1], weights[i]) + biases[i]\n valid_net_layer_out[i] = tf.matmul(valid_net_layer_out[i - 1], weights[i]) + biases[i]\n test_net_layer_out[i] = tf.matmul(test_net_layer_out[i - 1], weights[i]) + biases[i]\n\n # Training computation.\n logits = train_net_layer_out[-1]\n with tf.name_scope('cross_entropy'):\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))\n\n # Optimizer.\n with tf.name_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n\n # Predictions for the training, validation, and test data.\n with tf.name_scope(\"softmax\"):\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(valid_net_layer_out[-1])\n test_prediction = tf.nn.softmax(test_net_layer_out[-1])\n\n with tf.name_scope('accuracy'):\n tf.summary.scalar('loss', loss)\n summary_op = tf.summary.merge_all()\n\n\nnum_steps = 10001\n\ndef accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])\n\nwith tf.Session(graph=graph) as session:\n writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())\n tf.global_variables_initializer().run()\n print(\"Initialized\")\n for step in range(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_dataset[offset:(offset + batch_size), :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n\n feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}\n _, l, predictions, summary = session.run([optimizer, loss, train_prediction, summary_op], feed_dict=feed_dict)\n # _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)\n writer.add_summary(summary, step + i)\n if (step % 500 == 0):\n print(\"Minibatch loss at step %d: %f, %.1f%%, %.1f%%\" % (step, l, accuracy(predictions, batch_labels), accuracy(valid_prediction.eval(), valid_labels)))\n print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))\n","repo_name":"hotohoto/ud730","sub_path":"notMNIST/notMNIST.py","file_name":"notMNIST.py","file_ext":"py","file_size_in_byte":5694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13501054451","text":"import json\r\nimport time\r\nimport os\r\nimport csv\r\n\r\nuser_feed_items = []\r\nheader = ['videoId','publish','caption','username','thumbnail','url','comments','plays','shares','likes']\r\n\r\nfolderpath = 'C://narasi-project//parser-tiktok//input//narasi'\r\nlist_response = os.listdir(folderpath)\r\nprint(list_response)\r\n\r\nfor x in list_response:\r\n filename = os.path.join(folderpath,x)\r\n print(filename)\r\n\r\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\r\n\r\n directory = os.path.join('./data',timestr)\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n\r\n with open(filename, 'r',encoding=\"utf-8\") as j:\r\n output_response = json.loads(j.read())\r\n\r\n feed_item = output_response[\"itemList\"]\r\n\r\n nama_file = feed_item[0][\"author\"][\"uniqueId\"] + \".csv\"\r\n\r\n for i in range(len(feed_item)):\r\n user_feed_items.append(feed_item[i])\r\n \r\n\r\n with open(os.path.join(directory,nama_file), 'w', encoding='UTF8', newline='') as f:\r\n writer = csv.writer(f)\r\n writer.writerow(header)\r\n for item in user_feed_items:\r\n writer.writerow([\r\n item[\"id\"],\r\n time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(item[\"createTime\"])),\r\n item[\"desc\"],\r\n item[\"author\"][\"uniqueId\"],\r\n item[\"video\"][\"cover\"],\r\n 'https://www.tiktok.com/@' + item[\"author\"][\"nickname\"] + '/video/' + item[\"id\"],\r\n item[\"stats\"][\"commentCount\"],\r\n item[\"stats\"][\"playCount\"],\r\n item[\"stats\"][\"shareCount\"],\r\n item[\"stats\"][\"diggCount\"]\r\n ])\r\n \r\n\r\n\r\n","repo_name":"bangkit-pambudi/Response_Parsing_Tiktok","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34529311808","text":"from asyncio.windows_events import NULL\nimport math\nimport random\nimport os\n\nos.system('cls')\n\np = []\nq = [0,0,0,0]\nper = [0.0,0.0,0.0,0.0]\nRankSelect = [4,4,4,4]\n\nMax_Num = 1048576\nbin_len = len(bin(Max_Num)[2:])\nprint(bin_len)\n\n\n\nNoT = 0\n\n#MutationProbability = random.randint(1,100)\nMutationProbability = 80\n\n#Initial Population\nfor i in range(4):\n\tx = random.randint(0,Max_Num - 1)\n\n\t#binary values encoding\n\tx_b = bin(x)[2:]\n\tp.append(x_b)\n\n\tprint(int(x_b,2),end=\"\")\n\tprint(\":\",end=\"\")\n\tprint(x_b)\n\ndef clearConsole():\n command = 'clear'\n if os.name in ('nt', 'dos'): # If Machine is running on Windows, use cls\n command = 'cls'\n os.system(command)\n\n\ndef EndCondition(f):\n\tfor i in f:\n\t\t#Fitness function f(x) Decimal value\n\t\tif int(i,2) >= (Max_Num - 1):\n\t\t\treturn i\n\treturn NULL\n\n\n#---binary data processing---\n#---selection sort---???\n'''\nfor i in range(0,3):\n\tfor j in range(i + 1,4):\n\t\tif int(p[i],2) <= int(p[j],2):\n\t\t\tEmptyBox = p[i]\n\t\t\tp[i] = p[j]\n\t\t\tp[j] = EmptyBox\n'''\n\n#---0 stuffing---\nprint(\"\")\nfor i in range(4):\n\tp[i] = '{0:020d}'.format(int(p[i]))\n\tprint(p[i])\n\n\nwhile True:\n\tsolution = EndCondition(p)\n\t\n\n\tif solution == NULL:\n\t\tif NoT % 10 == 0 and NoT != 0:\n\n\t\t\tclearConsole()\n\n\t\t\tprint(\"~~~\")\n\t\t\tprint(\"-------------------------------------------------\")\n\t\t\t\n\n\t\tprint(\"root:\",end=\"\")\n\t\tprint(p)\n\t\tNoT += 1\n\n\t\t#---Roulette Wheel Selection Sum---\n\t\tFitness_Sum = 0\n\t\tfor i in range(4):\n\t\t\tFitness_Sum += int(p[i],2)\n\n\t\t#---%of total(range)---\n\t\tfor i in range(4):\n\t\t\tper[i] = int(p[i],2) / Fitness_Sum\n\t\t\tif i > 0:\n\t\t\t\tper[i] = per[i] + per[i - 1]\n\t\t\n\t\t#---Rounded down fourth decimal place---\n\t\tfor i in range(4):\n\t\t\tper[i] = math.floor(per[i] * 1000) / 1000\n\t\t\n\t\t\n\t\tprint(\"\")\n\t\tprint(\" %of total:\",end=\"\")\n\t\tprint(per)\n\n\t\t#---Rank select---\n\t\tfor i in range(4):\n\t\t\tRank = random.randrange(0,1000)\n\t\t\tRank /= 1000\n\t\t\tfor j in range(4):\n\t\t\t\tif Rank < per[j]:\n\t\t\t\t\tRankSelect[i] = j + 1\n\t\t\t\t\tprint(\"Rank:\",end=\"\")\n\t\t\t\t\tprint(Rank)\n\t\t\t\t\tbreak\n\t\t\n\n\t\tprint(RankSelect)\n\n\t\t#---Crossover----\n\t\tfor i in range(0,3,2):\n\t\t\tCrossoverPoint = random.randint(20 - bin_len,19)\n\n\t\t\tprint(\"\")\n\t\t\tprint(\"CrossoverPoint:\",end=\"\")\n\t\t\tprint(CrossoverPoint)\n\t\t\t\n\n\t\t\tP1 = p[RankSelect[i] - 1]\n\t\t\tP2 = p[RankSelect[i + 1] - 1]\n\n\t\t\tC1 = P1[:CrossoverPoint] + P2[CrossoverPoint:]\n\t\t\tC2 = P2[:CrossoverPoint] + P1[CrossoverPoint:]\n\n\t\t\tq[i] = C1\n\t\t\tq[i + 1] = C2\n\t\t\t\n\t\t\t\n\t\t\tprint(\"P1:\",end=\"\")\n\t\t\tprint(P1,end=\"\")\n\t\t\tprint(\" P2:\",end=\"\")\n\t\t\tprint(P2)\n\t\t\tprint(\"C1:\",end=\"\")\n\t\t\tprint(C1,end=\"\")\n\t\t\tprint(\" C2:\",end=\"\")\n\t\t\tprint(C2)\n\n\t\tfor i in range(4):\n\t\t\tq_pro = list(q[i])\n\n\t\t\tThreshold = random.randint(0,100)\n\n\t\t\t'''\n\t\t\tprint(\"\")\n\t\t\tprint(\"MutationProbability:\",end=\"\")\n\t\t\tprint(MutationProbability)\n\t\t\tprint(\"Threshold:\",end=\"\")\n\t\t\tprint(Threshold)\n\t\t\t'''\t\t\n\n\t\t\tif Threshold <= MutationProbability:\n\t\t\t\trand_posi = random.randint(22 - bin_len,20)\n\n\t\t\t\tif bin_len >= 17 and rand_posi <= 14:\n\t\t\t\t\trand_length = random.randint(1,6)\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\trand_length = random.randint(1,21 - rand_posi)\n\n\t\t\t\tfor j in range(rand_length):\n\t\t\t\t\tif q_pro[rand_posi - 1 + j] == \"0\":\n\t\t\t\t\t\tq_pro[rand_posi - 1 + j] = \"1\"\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tq_pro[rand_posi - 1 + j] = \"0\"\n\n\t\t\tss = \"\"\n\t\t\tfor s in q_pro:\n\t\t\t\tss += s\n\t\t\tq[i] = str(ss)\n\n\n\t\tprint(\"\")\n\t\tprint(\"solution:\",end=\"\")\n\t\tprint(q,end=\"\")\n\t\tprint(\" Trial:\",end=\"\")\n\t\tprint(NoT)\n\n\n\t\tp = q\n\t\tprint(\"-------------------------------------------------\")\n\n\t\t\n\n\t\t#solution = \"1111111100\"\n\t\t#break\n\n\telse:\n\t\tprint(p)\n\t\tbreak\n\n\n\nprint(\"\")\nprint(\"Answer is \",end=\"\")\nprint(int(solution,2),end=\"\")\nprint(\" Trials are \",end=\"\")\nprint(NoT)\n\nprint(\"MutationProbability:\",end=\"\")\nprint(MutationProbability,end=\"\")\nprint(\"%\")\n\t\t\t\n","repo_name":"FuRinYa1030/Creative_Programming_Lesson","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"44005412349","text":"import torch.nn as nn\nfrom torch.nn import Linear, ReLU\nimport torch_geometric\nfrom torch_geometric.nn import (\n GCNConv,\n InnerProductDecoder,\n Sequential,\n global_add_pool,\n NNConv,\n BatchNorm,\n global_mean_pool,\n)\n# from torch_geometric.utils import to_dense_adj, dense_to_sparse, to_dense_batch\nfrom torch_geometric.data import Data, Batch\nimport torch\nimport torch.nn.functional as F\nfrom math import ceil\n\n\nclass CIDER(nn.Module):\n def __init__(\n self,\n in_channels,\n task_model,\n hidden_channels1=32,\n hidden_channels2=64,\n hidden_channels3=10,\n decoder_act=torch.relu,\n ) -> None:\n super(CIDER, self).__init__()\n\n self.gcn_shared = Sequential(\n \"x, edge_index\",\n [\n (GCNConv(in_channels, hidden_channels1), \"x, edge_index -> x\"),\n ],\n )\n self.gcn_mu_causal = GCNConv(hidden_channels1, hidden_channels2)\n self.gcn_mu_non_causal = GCNConv(hidden_channels1, hidden_channels2)\n self.gcn_logvar_causal = GCNConv(hidden_channels1, hidden_channels2)\n self.gcn_logvar_non_causal = GCNConv(hidden_channels1, hidden_channels2)\n\n self.decoder_causal = InnerProductDecoderMLP(hidden_dims=None, act=decoder_act)\n self.decoder_non_causal = InnerProductDecoderMLP(hidden_dims=None, act=decoder_act)\n self.task_model = task_model\n self.relu = ReLU()\n self.hidden_channels2 = hidden_channels2\n\n def encode(self, x, edge_index, edge_attr=None):\n x = self.relu(self.gcn_shared(x, edge_index))\n mu_causal = self.gcn_mu_causal(x, edge_index)\n mu_non_causal = self.gcn_mu_non_causal(x, edge_index)\n logvar_causal = self.gcn_logvar_causal(x, edge_index)\n logvar_non_causal = self.gcn_logvar_non_causal(x, edge_index)\n return mu_causal, mu_non_causal, logvar_causal, logvar_non_causal\n\n def decode(self, z_causal, z_non_causal, edge_index):\n return self.decoder_causal(z_causal, edge_index), self.decoder_non_causal(\n z_non_causal, edge_index\n )\n\n def _sample_encode(\n self,\n x,\n edge_index,\n edge_attr=None,\n num_sample=5,\n batch=None,\n device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n ):\n \"\"\"\n Sample from the latent space and encode the sampled latent variables in Input Conditioned X, P(Z|X)\n Args:\n x (torch.Tensor): Node feature of input graph.\n edge_index (torch.nn.Module): Edge index of input graph(adjacency list, pairs of nodes).\n edge_weight (torch.Tensor or None, optional): Edge feature of input graph.\n num_sample (int, optional): Number of couterfactual samples to take from the latent space, default is 5.\n batch (torch.Tensor or None, optional): Optional batch information.\n \"\"\"\n\n ## mu_causal.shape: [#nodes, #hiddeen_channels2]\n ## mu_non_causal.shape: [#nodes, #hiddeen_channels2]\n\n mu_causal, mu_non_causal, logvar_causal, logvar_non_causal = self.encode(\n x, edge_index, edge_attr\n )\n\n sampled_z_causal = self.reparameterize(mu_causal, logvar_causal, device)\n sampled_z_non_causal = self.reparameterize(\n mu_non_causal.repeat(num_sample, 1),\n logvar_non_causal.repeat(num_sample, 1),\n device,\n )\n\n ## return sampled_z_causal.shape: [#nodes, #hiddeen_channels2]\n ## sampled_z_non_causal.shape: [#nodes*#samples, #hiddeen_channels2]\n return sampled_z_causal, sampled_z_non_causal\n\n def forward(\n self,\n x,\n edge_index,\n edge_attr=None,\n batch=None,\n device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n ):\n \"\"\"\n Performs a forward pass through the model for a causal graph prediction task.\n\n Args:\n x (torch.Tensor): Node feature of input graph.\n edge_index (torch.nn.Module): Edge index of input graph(adjacency list, pairs of nodes).\n edge_attr (torch.Tensor or None, optional): Edge feature of input graph.\n batch (torch.Tensor or None, optional): Optional batch information.\n device (torch.device, optional): Device to use for computation.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor,]: Tuple of edge weights for causal and non-causal edges, and the mean and logvar of the latent variables for causal and non-causal edges.\n \"\"\"\n mu_causal, mu_non_causal, logvar_causal, logvar_non_causal = self.encode(\n x, edge_index, edge_attr\n )\n z_causal = self.reparameterize(mu_causal, logvar_causal, device)\n\n z_non_causal = self.reparameterize(mu_non_causal, logvar_non_causal, device)\n\n edge_weight_causal, edge_weight_non_causal = self.decode(\n z_causal, z_non_causal, edge_index\n )\n return (\n edge_weight_causal,\n edge_weight_non_causal,\n mu_causal,\n mu_non_causal,\n logvar_causal,\n logvar_non_causal,\n )\n\n def CF_forward(\n self,\n data: torch_geometric.data.Data,\n causal_criterion,\n num_sample=5,\n sparsity=0.8,\n device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n ):\n \"\"\"\n Performs a forward pass through the model for counterfactual samples\n\n Args:\n data (torch_geometric.data.Data): Input graph data.\n causal_criterion (torch.nn.Module): Criterion used to calculate the causal loss.\n num_sample (int, optional): Number of counterfactual samples.\n sparsity (float, optional): Sparsity level for enforcing edge sparsity.\n device (torch.device, optional): Device to use for computation.\n\n Returns:\n Tuple[torch.Tensor, float]: Loss_causal and accuracy (correct/num_sample).\n \"\"\"\n\n # Encode the input data and sample causal and non-causal latent variables\n ## x.shape: [#node, #feature]\n ## edge_index.shape: [2, #edge]\n ## y.shape: [#node, #class]\n ## sampled_z_causal.shape: [#node, #hidden_channels2]\n ## sampled_z_non_causal.shape: [#node*#num_sample, #hidden_channels2]\n\n x = data.x\n edge_index = data.edge_index\n # y = data.y\n edge_attr = data.edge_attr\n batch = data.batch\n\n sampled_z_causal, sampled_z_non_causal = self._sample_encode(\n x,\n edge_index,\n num_sample=num_sample,\n edge_attr=edge_attr,\n batch=batch,\n device=device,\n )\n\n # Decode the latent variables to obtain sampled causal adjacency list\n ## edge_weight_causal.shape: [#edge]\n edge_weight_causal = self.decoder_causal(sampled_z_causal, edge_index)\n\n # merge counterfactual samples into one batch data\n data_list = [data for i in range(num_sample)]\n data_batch = (Batch.from_data_list(data_list)).to(device)\n\n\n # TODO: ERROR: the `sampled_z_non_causal` always 1. \n # Decode the latent variables to obtain sampled non-causal adjacency list\n ## edge_weight_non_causal.shape: [#edge*#num_sample]\n edge_weight_non_causal = self.decoder_non_causal(\n sampled_z_non_causal, data_batch.edge_index\n )\n\n # compute uniform loss for non-causal graph\n edge_index_non_causal = data_batch.edge_index.T[edge_weight_non_causal >= 0].T\n sampled_y_non_cuasal = self.task_model(\n data_batch.x, edge_index_non_causal, batch=data_batch.batch\n )\n uniform_target = (\n torch.ones_like(sampled_y_non_cuasal) / self.task_model.label_dim\n ).to(device)\n loss_uniform = F.kl_div(\n F.softmax(sampled_y_non_cuasal), uniform_target, reduction=\"batchmean\"\n )\n\n\n # TODO: There are some error, the threshold is not correct, it should be computed by edge_weight_causal only\n # repeat causal edge weights for each counterfactual sample and add then to edge_weight\n # edge_weight = edge_weight_causal.repeat(num_sample) + edge_weight_non_causal\n edge_weight = edge_weight_causal.repeat(num_sample)\n\n # Select top-k edges based on the weight threshold to enforce sparsity\n topk = min(\n ceil(edge_weight_causal.shape[0] * sparsity),\n edge_weight_causal.shape[0] - 1,\n )\n\n # reshape the edge_weight to [#num_sample, #edge] to select top-k edges for each sample\n edge_weight_reshape = edge_weight.reshape(num_sample, -1)\n\n # sort the edge weights in descending order in first dim and select the top-k edges for each sample(every row represents a sample)\n threshold = (\n edge_weight_reshape.sort(descending=True, dim=1)\n .values.topk(topk)\n .values[:, -1]\n )\n\n # expand the threshold to the same shape as edge_weight_reshape\n ## threshold.shape: [#num_sample, #edge]\n threshold = threshold.unsqueeze(1).expand_as(edge_weight_reshape)\n\n # reshape the threshold to verctor and calculate the edge mask\n ## edge_mask.shape: [#num_sample*#edge]\n # warning: the > is require to avoid the all edge_weight are 0\n edge_mask = (edge_weight_reshape > threshold).reshape(-1) | (edge_weight_non_causal>0.5)\n\n # select the top-k edges for each sample\n data_batch.edge_index = data_batch.edge_index.T[edge_mask].T\n\n # Pass the sampled input and adjacency list to the task model\n sampled_y = self.task_model(\n data_batch.x, data_batch.edge_index, batch=data_batch.batch\n )\n\n # Calculate the causal loss using the criterion and repeat the target labels\n loss_causal = causal_criterion(sampled_y, data_batch.y)\n\n # Calculate the accuracy by comparing the predicted and target labels\n correct = float(sampled_y.argmax(dim=1).eq(data_batch.y).sum().item())\n\n return loss_uniform, loss_causal, correct / data_batch.num_graphs\n\n @torch.no_grad()\n def get_explainations(\n self,\n x,\n edge_index,\n edge_attr=None,\n device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n ):\n \"\"\"\n Returns the explainations for the input data\n Args:\n x (torch.Tensor): Input node features.\n edge_index (torch.Tensor): Input edge indices.\n edge_attr (torch.Tensor, optional): Input edge features.\n device (torch.device, optional): Device to use for computation\n Returns:\n Dict[str, torch.Tensor]: Dictionary containing the explainations of different sparsity.\n \"\"\"\n self.eval()\n sparsities = [round(i / (i + 1), 2) for i in range(1, 10)][::-1]\n true_sparsities = [round(0.1 * i, 2) for i in range(9, 0, -1)]\n explainations = {}\n for sparsity, true_sparsity in zip(sparsities, true_sparsities):\n # mu_causal, _, logvar_causal, _ = self.encode(\n # x, edge_index, edge_attr)\n mu_causal, mu_non_causal, logvar_causal, logvar_non_causal = self.encode(\n x, edge_index, edge_attr\n )\n z_causal = self.reparameterize(mu_causal, logvar_causal, device)\n z_non_causal = self.reparameterize(mu_non_causal, logvar_non_causal, device)\n edge_weight_causal = self.decoder_causal(z_causal, edge_index)\n # edge_weight_non_causal = self.decoder_non_causal(z_non_causal, edge_index)\n topk = max(ceil(edge_index.shape[1] * sparsity), 1)\n threshold = (\n edge_weight_causal.sort(descending=True).values.topk(topk).values[-1]\n )\n # print(str(true_sparsity), ' causal ', edge_weight_causal)\n # print(str(true_sparsity), ' non ausal ', edge_weight_non_causal)\n noise = (torch.randn(edge_weight_causal.shape[0]//2)*1e-4).repeat_interleave(2)\n noise = noise.to(device)\n edge_index = edge_index.T[edge_weight_causal+noise > threshold].T\n explainations[str(true_sparsity)] = Data(x=x, edge_index=edge_index)\n\n return explainations\n\n def reparameterize(\n self, mu: torch.Tensor, logvar: torch.Tensor, device\n ) -> torch.Tensor:\n \"\"\"\n return the sampling from the latent Gaussian distribution with reparaterization trick\n :param mu: (Tensor) Mean of the latent Gaussian\n :param logvar: (Tensor) 2*log(Standard deviation) of the latent Gaussian\n :return : (Tensor) Sampled latent vector\n \"\"\"\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std).to(device)\n return eps \n std + mu\n\n\n# class InnerProductDecoderMLP(nn.Module):\n# \"\"\"Decoder for using inner product for prediction.\"\"\"\n\n# def __init__(\n# self, input_dim, hidden_dim1, hidden_dim2, dropout=0.1, act=torch.sigmoid\n# ):\n# super(InnerProductDecoderMLP, self).__init__()\n\n# # Fully connected layers\n# self.fc = nn.Linear(input_dim, hidden_dim1)\n# self.fc2 = nn.Linear(hidden_dim1, hidden_dim2)\n\n# self.dropout = dropout\n# self.act = act\n\n# # Initialize the parameters\n# self._reset_parameters()\n\n# def _reset_parameters(self):\n# \"\"\"\n# Reset model parameters using Xavier initialization.\n# \"\"\"\n# torch.nn.init.xavier_uniform_(self.fc.weight)\n# torch.nn.init.zeros_(self.fc.bias)\n# torch.nn.init.xavier_uniform_(self.fc2.weight)\n# torch.nn.init.zeros_(self.fc2.bias)\n\n# def forward_all(self, z):\n# \"\"\"\n# Compute the forward pass for the entire graph.\n\n# Args:\n# z (torch.Tensor): The latent space Z.\n\n# Returns:\n# torch.Tensor: The adjacency matrix of the graph.\n# \"\"\"\n# z = self._forward_fc(z)\n# adj = self.act(torch.matmul(z, z.t()))\n# return adj\n\n# def forward(self, z: torch.Tensor, edge_index: torch.Tensor) -> torch.Tensor:\n# \"\"\"\n# Compute the forward pass for the given node-pairs.\n\n# Args:\n# z (torch.Tensor): The latent space Z.\n# edge_index (torch.Tensor): Index tensor representing node-pairs in the graph.\n\n# Returns:\n# torch.Tensor: The predicted values for the node-pairs.\n# \"\"\"\n# z = self._forward_fc(z)\n\n# edge_weight = self.act((z[edge_index[0]] * z[edge_index[1]]).sum(dim=1))\n\n# return edge_weight\n\n# def _forward_fc(self, z):\n# \"\"\"\n# Compute the forward pass through the fully connected layers.\n\n# Args:\n# z (torch.Tensor): The latent space Z.\n\n# Returns:\n# torch.Tensor: The output after passing through the fully connected layers.\n# \"\"\"\n# z1 = torch.relu(self.fc(z))\n# z2 = torch.sigmoid(self.fc2(z1))\n# z3 = F.dropout(z2, self.dropout, training=self.training)\n# return z3\n\nclass InnerProductDecoderMLP(nn.Module):\n \"\"\"Decoder for using inner product for prediction.\"\"\"\n\n def __init__(self, hidden_dims, dropout=0.1, act=torch.sigmoid):\n super(InnerProductDecoderMLP, self).__init__()\n self.dropout = dropout\n self.act = act\n\n # Initialize hidden_dims as an empty list if None is provided\n hidden_dims = hidden_dims or []\n\n # Create the layers based on hidden_dims\n self.fc_layers = nn.ModuleList()\n for i in range(1, len(hidden_dims)):\n self.fc_layers.append(nn.Linear(hidden_dims[i-1], hidden_dims[i]))\n\n # Output layer is only added if there are hidden layers\n if hidden_dims:\n self.output_layer = nn.Linear(hidden_dims[-1], hidden_dims[0])\n self._reset_parameters()\n\n def _reset_parameters(self):\n \"\"\"\n Reset model parameters using Xavier initialization for all layers.\n \"\"\"\n for layer in self.fc_layers:\n torch.nn.init.xavier_uniform_(layer.weight)\n torch.nn.init.zeros_(layer.bias)\n torch.nn.init.xavier_uniform_(self.output_layer.weight)\n torch.nn.init.zeros_(self.output_layer.bias)\n\n def _forward_fc(self, z):\n \"\"\"\n Compute the forward pass through the fully connected layers.\n \"\"\"\n for layer in self.fc_layers:\n z = F.relu(layer(z))\n z = F.dropout(z, self.dropout, training=self.training)\n if self.fc_layers:\n z = self.output_layer(z)\n return z\n\n def forward_all(self, z):\n \"\"\"\n Compute the forward pass for the entire graph.\n \"\"\"\n if self.fc_layers:\n z = self._forward_fc(z)\n adj = self.act(torch.matmul(z, z.t()))\n return adj\n\n def forward(self, z: torch.Tensor, edge_index: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Compute the forward pass for the given node-pairs.\n \"\"\"\n if self.fc_layers:\n z = self._forward_fc(z)\n edge_weight = self.act((z[edge_index[0]] * z[edge_index[1]]).sum(dim=1))\n return edge_weight\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"zhangqib/CIDER","sub_path":"cider.py","file_name":"cider.py","file_ext":"py","file_size_in_byte":17389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"16649696844","text":"import numpy as np\nimport torch\nfrom librep.estimators.ae.torch.models.topological_ae.topological_ae import (\n TopologicallyRegularizedAutoencoder\n)\nfrom tqdm.notebook import tqdm\nfrom sklearn.model_selection import train_test_split\nfrom librep.base.transform import Transform\nfrom librep.config.type_definitions import ArrayLike\nfrom torch.optim import Adam\nimport matplotlib.pyplot as plt\nimport pickle\nimport random\nimport os\nimport shutil\n# from torch.nn.parallel import DistributedDataParallel as DDP\n# import torch.distributed as dist\n# import torch.nn.DataParallel\n# torch.nn.DataParallel(model, device_ids=[0, 1, 2])\n\n\nclass TopologicalDimensionalityReduction(Transform):\n\n def __init__(\n self, ae_model='ConvolutionalAutoencoder', ae_kwargs=None,\n lam=1., patience=None, num_epochs=500, batch_size=64,\n # input_shape=(-1, 1, 28, 28),\n cuda_device_name='cuda:0',\n # start_dim=180,\n latent_dim=10,\n save_dir='data/', save_tag=0, save_frequency=250, verbose=False\n ):\n # os.environ['MASTER_ADDR'] = 'localhost'\n # os.environ['MASTER_PORT'] = '12355'\n # dist.init_process_group(\"gloo\", rank=0, world_size=6)\n self.save_dir = save_dir\n self.save_tag = save_tag\n self.save_frequency = save_frequency\n self.patience = patience\n self.num_epochs = num_epochs\n self.model_name = ae_model\n self.model_lambda = lam\n # self.model_start_dim = start_dim\n self.model_latent_dim = latent_dim\n self.ae_kwargs = ae_kwargs\n self.verbose = verbose\n # Setting cuda device\n self.cuda_device = torch.device(cuda_device_name)\n self.batch_size = batch_size\n # self.input_shape = input_shape\n self.max_loss = None\n self.current = {\n 'epoch': 0,\n 'train_recon_error': None,\n 'train_topo_error': None,\n 'train_error': None,\n 'val_recon_error': None,\n 'val_topo_error': None,\n 'val_error': None,\n 'last_error': None\n }\n self.history = {\n 'epoch': [],\n 'train_recon_error': [],\n 'train_topo_error': [],\n 'train_error': [],\n 'val_recon_error': [],\n 'val_topo_error': [],\n 'val_error': []\n }\n\n def fit(self, X: ArrayLike, y: ArrayLike = None, X_val: ArrayLike = None, y_val: ArrayLike = None):\n # Computing input dimensions for the model\n # in the second dim of X.shape\n # ----------------------------------------------\n # When the input is 2d:\n # ----------------------------------------------\n original_dim = X.shape[1]\n # self.model_start_dim = original_dim\n # Setting self.input_shape\n self.input_shape = (-1, 1, original_dim)\n if self.ae_kwargs['num_CL'] == 0:\n self.input_shape = (-1, original_dim)\n # Setting ae_kwargs['input_dims']\n self.ae_kwargs['input_dims'] = (1, original_dim)\n # ----------------------------------------------\n # When the input is 3d (length, dim1, dim2): TODO\n # ----------------------------------------------\n \n # Initializing all\n self.model = TopologicallyRegularizedAutoencoder(\n autoencoder_model=self.model_name,\n lam=self.model_lambda, ae_kwargs=self.ae_kwargs\n )\n self.model.to(self.cuda_device)\n # Optimizer\n self.optimizer = Adam(self.model.parameters(), lr=1e-3, weight_decay=1e-5)\n \n # Save file name\n random_number = random.randint(1000, 9999)\n best_file_name = '{}_{}_{}_{}'.format(\n self.model_name, random_number, self.model_lambda, self.save_tag)\n best_file_name = best_file_name + '.toerase' # custom extension\n \n # First assignation\n train_X = X\n train_Y = y\n val_X = X_val\n val_Y = y_val\n \n # If it is None, then update\n if val_X is None:\n # Splitting X into train and validation\n train_X, val_X, train_Y, val_Y = train_test_split(\n X, y, random_state=0,\n train_size=.8,\n stratify=y\n )\n train_data_loader = torch.utils.data.DataLoader(\n dataset=train_X,\n batch_size=self.batch_size,\n shuffle=True\n )\n val_data_loader = torch.utils.data.DataLoader(\n dataset=val_X,\n batch_size=self.batch_size,\n shuffle=True\n )\n patience = self.patience\n max_loss = self.max_loss\n # Preparing for plot\n self.train_final_error = []\n self.train_recon_error = []\n self.train_topo_error = []\n\n self.val_final_error = []\n self.val_recon_error = []\n self.val_topo_error = []\n # Setting cuda\n # cuda0 = torch.device('cuda:0')\n for epoch in range(self.num_epochs):\n epoch_number = self.current['epoch'] + 1\n epoch_train_loss = []\n epoch_train_ae_loss = []\n epoch_train_topo_error = []\n epoch_val_loss = []\n epoch_val_ae_loss = []\n epoch_val_topo_error = []\n self.model.train()\n for data in train_data_loader:\n # reshaped_data = np.reshape(data, self.input_shape)\n # in_tensor = torch.tensor(reshaped_data, device=self.cuda_device).float()\n in_tensor = torch.reshape(data, self.input_shape)\n in_tensor = in_tensor.to(self.cuda_device)\n in_tensor = in_tensor.float()\n loss, loss_components = self.model(in_tensor)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n epoch_train_loss.append(loss.item())\n epoch_train_ae_loss.append(loss_components['loss.autoencoder'].item())\n epoch_train_topo_error.append(loss_components['loss.topo_error'].item())\n # Verificar despues self.model()\n for data in val_data_loader:\n # reshaped_data = np.reshape(data, self.input_shape)\n # in_tensor = torch.tensor(reshaped_data, device=self.cuda_device).float()\n in_tensor = torch.reshape(data, self.input_shape)\n in_tensor = in_tensor.to(self.cuda_device)\n in_tensor = in_tensor.float()\n loss, loss_components = self.model(in_tensor)\n epoch_val_loss.append(loss.item())\n epoch_val_ae_loss.append(loss_components['loss.autoencoder'].item())\n epoch_val_topo_error.append(loss_components['loss.topo_error'].item())\n self.current['epoch'] = self.current['epoch'] + 1\n self.current['train_recon_error'] = np.mean(epoch_train_ae_loss)\n self.current['train_topo_error'] = np.mean(epoch_train_topo_error)\n self.current['train_error'] = np.mean(epoch_train_loss)\n self.current['val_recon_error'] = np.mean(epoch_val_ae_loss)\n self.current['val_topo_error'] = np.mean(epoch_val_topo_error)\n self.current['val_error'] = np.mean(epoch_val_loss)\n self.history['epoch'].append(self.current['epoch'])\n self.history['train_recon_error'].append(self.current['train_recon_error'])\n self.history['train_topo_error'].append(self.current['train_topo_error'])\n self.history['train_error'].append(self.current['train_error'])\n self.history['val_recon_error'].append(self.current['val_recon_error'])\n self.history['val_topo_error'].append(self.current['val_topo_error'])\n self.history['val_error'].append(self.current['val_error'])\n loss_per_epoch = self.current['val_error']\n\n # Check for save the BEST version every \"n\" epochs: save frequency\n # assuming there is already a best version called \"best_file_name\"\n if epoch_number % self.save_frequency == 0:\n self.partial_save(reuse_file=best_file_name)\n # Copy the file and rename it:\n # shutil.copyfile(self.save_dir + best_file_name, self.save_dir + '')\n \n # Update max loss allowed: if None, then copy from loss_per_epoch\n # print(max_loss)\n max_loss = max_loss or loss_per_epoch\n # If this model beats the better found until now:\n if loss_per_epoch < max_loss:\n # print('MAXLOSS update from', max_loss, 'to', loss_per_epoch, random_number)\n # If LAST model was already created, delete it\n if os.path.exists(best_file_name):\n os.remove(best_file_name)\n # Save the new LAST model\n self.partial_save(name=best_file_name)\n # Update max_loss\n max_loss = loss_per_epoch\n if self.verbose:\n print('Best result found at', self.current['epoch'])\n loss_per_epoch = np.mean(epoch_val_ae_loss) + np.mean(epoch_val_topo_error)\n ae_loss_per_epoch = np.mean(epoch_val_ae_loss)\n topo_loss_per_epoch = np.mean(epoch_val_topo_error)\n if self.verbose:\n print(f'Epoch:{epoch+1}, P:{patience}, Loss:{loss_per_epoch:.4f}, Loss-ae:{ae_loss_per_epoch:.4f}, Loss-topo:{topo_loss_per_epoch:.4f}')\n if self.patience:\n if max_loss < loss_per_epoch:\n if patience == 0:\n break\n patience -= 1\n else:\n max_loss = loss_per_epoch\n patience = self.patience\n # Update to the best version found\n self.partial_load(name=best_file_name)\n # Erase the temporal file\n os.remove(self.save_dir + best_file_name)\n return self\n \n def plot_training(self, title_plot=None):\n fig, ax = plt.subplots(figsize=(10,10))\n ax.set_title('Training')\n if title_plot:\n ax.set_title(title_plot)\n ax.plot(self.history['train_recon_error'], label='reconstruction error - train', color='red')\n ax.plot(self.history['val_recon_error'], label='reconstruction error - val', color='orange')\n ax.set_xlabel('Epoch')\n ax.set_ylabel(\"Reconstruction error\", color=\"red\", fontsize=14)\n ax.legend(loc=2)\n ax.set_ylim(bottom=0)\n\n ax2 = ax.twinx()\n ax2.plot(self.history['train_topo_error'], label='Topological error - train', color='blue')\n ax2.plot(self.history['val_topo_error'], label='Topological error - val', color='black')\n ax2.set_ylabel(\"Topological error\", color=\"blue\", fontsize=14)\n ax2.legend(loc=1)\n ax2.set_ylim(bottom=0)\n plt.grid()\n plt.show()\n \n def save(self, save_dir='data/', tag=None):\n model_name = self.model_name\n model_lambda = self.model_lambda\n # model_start_dim = self.model_start_dim\n model_latent_dim = self.model_latent_dim\n model_epc = self.current['epoch']\n filename = '{}_{}-{}_{}_{}.pkl'.format(\n model_name, model_lambda,\n # model_start_dim,\n model_latent_dim,\n model_epc, self.save_tag)\n full_dir = self.save_dir + filename\n filehandler = open(full_dir, 'wb')\n pickle.dump(self, filehandler)\n filehandler.close()\n return full_dir\n \n def partial_save(self, name=None, reuse_file=None):\n model_name = self.model_name\n model_lambda = self.model_lambda\n # model_start_dim = self.model_start_dim\n model_latent_dim = self.model_latent_dim\n model_epc = self.num_epochs\n model_tag = self.save_tag\n filename = '{}_{}-{}_{}_{}_ep{}'.format(\n model_name, model_lambda,\n # model_start_dim,\n model_latent_dim,\n model_epc, model_tag, self.current['epoch'])\n if name:\n filename = name\n full_dir = self.save_dir + filename\n if reuse_file:\n shutil.copyfile(self.save_dir + reuse_file, full_dir)\n return\n torch.save({\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict()\n }, full_dir)\n \n def partial_load(self, epoch=250, name=None):\n model_name = self.model_name\n model_lambda = self.model_lambda\n # model_start_dim = self.model_start_dim\n model_latent_dim = self.model_latent_dim\n model_epc = self.num_epochs\n model_tag = self.save_tag\n filename = '{}_{}-{}_{}_{}_ep{}'.format(\n model_name, model_lambda,\n # model_start_dim,\n model_latent_dim,\n model_epc, model_tag, epoch)\n if name:\n filename = name\n full_dir = self.save_dir + filename\n checkpoint = torch.load(full_dir)\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n def load(self, filename='data/test.pkl'):\n filehandler = open(filename, 'rb')\n self = pickle.load(filehandler)\n filehandler.close()\n print('Loaded ', filename)\n \n # TODO\n def transform(self, X: ArrayLike):\n # Setting cuda\n cuda0 = torch.device('cuda:0')\n self.model.eval()\n reshaped_data = np.reshape(X, self.input_shape)\n in_tensor = torch.tensor(reshaped_data, device=cuda0).float()\n return self.model.encode(in_tensor).cpu().detach().numpy()\n \n def inverse_transform(self, X: ArrayLike):\n # Setting cuda\n cuda0 = torch.device('cuda:0')\n self.model.eval()\n reshaped_data = np.reshape(X, (-1, 1, X.shape[-1]))\n in_tensor = torch.tensor(reshaped_data, device=cuda0).float()\n decoded = self.model.decode(in_tensor).cpu().detach().numpy()\n return np.reshape(decoded, (X.shape[0], -1))\n\n def transform_and_back(self, X: ArrayLike, plot_function):\n self.model.eval()\n reshaped_data = np.reshape(X, self.input_shape)\n in_tensor = torch.Tensor(reshaped_data).float()\n X_encoded = self.model.encode(in_tensor).detach().numpy()\n plot_function(X, X_encoded)\n return \n \n def analize_patience(self, data):\n patiences = []\n patience = 0\n p = patience\n max_loss = np.max(data) + 1\n for index in range(1, len(data)):\n # print(index, data[index], p)\n if data[index] < max_loss:\n max_loss = data[index]\n p = patience\n else:\n if p == 0:\n # print('PATIENCE', patience,' found in index', index, 'with value', data[index])\n patiences.append(index)\n patience +=1\n p += 1\n p -= 1\n return (data, patiences)\n\n\nclass ConvTAETransform(TopologicalDimensionalityReduction):\n def __init__(self,\n model_name='ConvTAE_def',\n model_lambda=1,\n patience=None,\n num_epochs=175,\n # start_dim=180,\n latent_dim=2,\n batch_size=64,\n cuda_device_name='cuda:0',\n extra_properties={},\n save_dir='data/', save_tag=0, save_frequency=250):\n ae_kwargs = {\n # 'input_dims': (1, start_dim),\n 'latent_dim': latent_dim\n }\n ae_kwargs.update(extra_properties)\n # input_shape = (-1, 1, start_dim)\n # if ae_kwargs['num_CL'] == 0:\n # input_shape = (-1, start_dim)\n super().__init__(\n ae_model=model_name,\n ae_kwargs=ae_kwargs,\n lam=model_lambda,\n patience=patience,\n num_epochs=num_epochs,\n batch_size=batch_size,\n # input_shape=input_shape,\n cuda_device_name=cuda_device_name,\n # start_dim=start_dim,\n latent_dim=latent_dim,\n save_dir=save_dir,\n save_tag=save_tag,\n save_frequency=save_frequency\n )\n","repo_name":"discovery-unicamp/hiaac-librep","sub_path":"src/librep/transforms/topo_ae.py","file_name":"topo_ae.py","file_ext":"py","file_size_in_byte":16329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"2182220398","text":"import math\r\ndef decode_coord( wstr ):#wstr like \"c0 a2 b3 c3\"\r\n#eg. \"c0 a2 b3 c3\" --> -163.1541 which is x co-ordinate\r\n return decode_coordinate( j(wstr) );\r\n \r\ndef j( str ):#returns array\r\n #\"c0 a2 b3 c3\" ---> [127 162 33 33]\r\n array=str.split(\" \");\r\n w=[];\r\n for x in array:\r\n w.append(i(x)); \r\n return w;\r\ndef i( h ): # h is hex string \"ff\"-->255\r\n return int(h,16);\r\ndef k( r ,l): # 176-->[176], 666-->[02, 154]\r\n hx=hex(r);\r\n hx=hx[2:];\r\n if(len(hx)%2==1):\r\n hx=\"0\"+hx;\r\n c=[];\r\n for x in range(0, len(hx), 2):\r\n c.append(i(hx[x:x+2]));\r\n while len(c) < l:\r\n c.insert(0,0);\r\n return c;\r\ndef decode_coordinate( w ): #w must be array like [ 127 82 45 255 ]\r\n #[ 127 82 45 255 ] ---> -163.415 = x co-ordinate\r\n a=w[0];\r\n if( a < 192 ): # x must be positive\r\n y = (a - 64)*2 + 1; # 2^y <= x < 2^(y+2) y is odd\r\n w.pop(0);\r\n str = \"\"\r\n for element in w:\r\n h=hex(element)[2:];\r\n str+=h;\r\n z = decodeFraction( str )\r\n if( z <= 0.5 ):\r\n x = z * pow(2, y+1) + pow(2,y);\r\n else:\r\n x=z * pow( 2, y+2 );\r\n else:\r\n y = (a - 192)*2 + 1; # 2^y <= x < 2^(y+2) y is odd\r\n w.pop(0);\r\n str = \"\"\r\n for element in w:\r\n h=hex(element)[2:];\r\n str+=h;\r\n z = decodeFraction( str )\r\n if( z <= 0.5 ):\r\n x = z * pow(2, y+1) + pow(2,y);\r\n else:\r\n x=z * pow( 2, y+2 );\r\n x = -x;\r\n return x;\r\ndef decodeFraction( y ): #0.bbaa ----> 0.7856\r\n #y must be bbaa\r\n x = len(y);\r\n a = int(y,16);\r\n return a * (1/pow(16,x));\r\ndef encodeFraction( x ,d): # to 3 places eg. 0.bba\r\n #now to d places.eg. ( 0.5,2) -->[8 0]\r\n a= x/(1/16);\r\n y = math.floor(a);\r\n if( d == 1 ):\r\n return [y];\r\n else:\r\n b = a % 1;\r\n z =encodeFraction( b, d-1 );\r\n z.insert(0,y);\r\n return y;\r\n \r\ndef encode_coordinate( x ):\r\n #162.444 ----> returns [ 127 45 25 255] as needed in wireshark\r\n #this is the first function i made.\r\n if( x == 0 ):\r\n return [0,0,0,0];\r\n if( x > 0 ):\r\n c1=64;\r\n else:\r\n c1=192;x=-x;\r\n \r\n y = math.floor( math.log2( x ) );\r\n bool = 0;\r\n if( y % 2 == 0 ):\r\n bool = 1;\r\n y-=1;\r\n x1= c1+(y-1)/2;\r\n if( bool == 0):\r\n y2=( x - pow( 2, y ))/(pow(2, y+1));\r\n else:\r\n y2= x/pow(2,y+2);\r\n temp = y2/(1/(16*16));\r\n x2 = math.floor( temp );\r\n temp = temp - x2;\r\n temp = temp/(1/(16*16))\r\n x3 = math.floor(temp);\r\n temp = temp - x3;\r\n temp = temp/(1/(16*16));\r\n x4 = math.floor(temp);\r\n return [ (int)(x1),(int)(x2),(int)(x3),(int)(x4) ];","repo_name":"habi498/create_actor","sub_path":"position.py","file_name":"position.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"8592006199","text":"import discord\nfrom discord.ext import commands\nimport asyncio\nimport os\nimport random\nimport MAFIA.story as story\nimport MAFIA.prep as prep\nimport MAFIA.gvar as gvar\n\nclass mafia:\n def __init__(self, bot):\n self.bot = bot\n \n gameOn = False\n ready = False\n\n mafiaList = [] #Names \n DDList = [] #Names\n liveList = [] #names\n nominateList = []\n mChannel = None\n\n mLive = 0\n mDead = 0\n vLive = 0\n vDead = 0\n\n mafiaPlayers = {}\n\n victim = None\n healVictim = None\n pastHeal = None\n\n mafia = None #user\n doctor = None #user\n detective = None #user\n villagers = [] #id\n werewolf = [] #id\n politician = None #id\n\n @commands.command(pass_context = True)\n async def joinP(self, ctx):\n if self.gameOn == True or self.ready == True:\n await self.bot.send_message(ctx.message.channel, \"You cannot currently join right now because there is a game going on.\")\n else:\n if not ctx.message.author in self.mafiaPlayers.keys():\n self.mafiaPlayers[ctx.message.author] = \"\" # add author to dictionary\n\n await self.bot.send_message(ctx.message.channel, \"You have been added to the list.\")\n embed = discord.Embed(title = \"Mafia Party:\".format(), colour = discord.Colour.purple())\n server = ctx.message.server\n for player in self.mafiaPlayers.keys():\n embed.add_field(name = \"Player:\", value = \"{}\".format(player.name), inline = False)\n await self.bot.send_message(ctx.message.channel, embed = embed)\n else:\n await self.bot.send_message(ctx.message.channel, \"You are already in the party.\")\n\n @commands.command(pass_context = True)\n async def leaveP(self, ctx):\n if self.gameOn == True or self.ready == True:\n await self.bot.send_message(ctx.message.channel, \"You cannot currently leave right now because there is a game going on.\")\n else:\n if not ctx.message.author in self.mafiaPlayers.keys():\n await self.bot.send_message(ctx.message.channel, \"You are not in the party.\")\n else:\n self.mafiaPlayers.pop(ctx.message.author, None)\n await self.bot.send_message(ctx.message.channel, \"You have left the party.\")\n\n @commands.command(pass_context = True)\n async def party(self, ctx):\n server = ctx.message.server\n embed = discord.Embed(title = \"Mafia Party:\".format(), colour = discord.Colour.purple())\n server = ctx.message.server\n for player in self.mafiaPlayers.keys():\n embed.add_field(name = \"Player:\", value = \"{}\".format(player.name), inline = True)\n await self.bot.send_message(ctx.message.channel, embed = embed)\n\n @commands.command(pass_context = True)\n async def setGame(self, ctx):\n #if len(self.mafiaPlayers) < 5:\n #await self.bot.send_message(ctx.message.channel, \"Sorry. You need at least 5 people to play the game. You only have {} players.\".format(len(self.partyL)))\n #else:\n if self.ready == True:\n await self.bot.send_message(ctx.message.channel, \"You have already set up. Type /start to begin.\")\n \n elif self.gameOn == True:\n await self.bot.send_message(ctx.message.channel, \"There is already a game playing.\")\n\n else:\n server = ctx.message.server\n self.ready = True\n\n prepObj = prep.prepare(self.bot, self.mafiaPlayers)\n prepObj.assignRoles()\n # Finished settings roles\n\n # Inform player of roles\n for player, data in self.mafiaPlayers.items():\n if(data.roleName == 'mafia'):\n embed = discord.Embed(title = \"You are the Mafia. Your job is to kill everyone. Pretty simple.\", colour = discord.Colour.red())\n embed.set_thumbnail(url = \"https://images2.minutemediacdn.com/image/upload/c_scale,w_912,h_516,c_fill,g_auto/shape/cover/sport/5b73276e8f1752549a000001.jpeg\")\n await self.bot.send_message(player, embed = embed)\n elif(data.roleName == 'doctor'):\n embed = discord.Embed(title = \"You are the Doctor. Your job is to save people. But you can't save the same person twice in a row.\", colour = discord.Colour.blue())\n embed.set_thumbnail(url = \"https://res.cloudinary.com/teepublic/image/private/s--NyIx9Nop--/t_Preview/b_rgb:c62b29,c_limit,f_jpg,h_630,q_90,w_630/v1469022975/production/designs/592798_1.jpg\")\n await self.bot.send_message(player, embed = embed)\n elif(data.roleName == 'detective'):\n embed = discord.Embed(title = \"You are the Detective. Your job is to find the Mafia.\", colour = discord.Colour.orange())\n embed.set_thumbnail(url = \"https://78.media.tumblr.com/9681fb542682771069c3864dcbae7ef8/tumblr_o1mh5vUWe91r0sasuo1_400.gif\")\n await self.bot.send_message(player, embed = embed)\n elif(data.roleName == 'politician'):\n embed = discord.Embed(title = \"You are the Politician. You're just another villager, but you can accept bribe from Mafia to be on his side. Sounds fun. And realistic.\", colour = discord.Colour.green())\n await self.bot.send_message(player, embed = embed)\n else:\n embed = discord.Embed(title = \"You are just a normal innocent villager who might get accused for crimes you didn't commit ¯\\_(ツ)_/¯ \", colour = discord.Colour.dark_gold())\n embed.set_thumbnail(url = \"https://www.ssbwiki.com/images/thumb/a/ac/Villager_SSBU.png/250px-Villager_SSBU.png\")\n await self.bot.send_message(player, embed = embed)\n await self.bot.create_channel(server, \"mafia\")\n await self.bot.send_message(ctx.message.channel, \"Everything's ready! Type /start to start the game!\")\n\n\n @commands.command(pass_context = True)\n async def deleteMafia(self, ctx):\n try:\n server = ctx.message.server\n channel = self.findChannel(server)\n await self.bot.delete_channel(channel)\n except Exception:\n await self.bot.send_message(ctx.message.channel, \"Error\")\n \n @commands.command(pass_context = True)\n async def start(self, ctx):\n if self.ready == False:\n await self.bot.send_message(ctx.message.channel, \"You didn't set up yet. Type /setGame first.\")\n \n elif self.gameOn == True:\n await self.bot.send_message(ctx.message.channel, \"There is already a game going on!\")\n else:\n self.gameOn = True\n server = ctx.message.server\n channel = self.findChannel(server)\n for player in self.mafiaPlayers.keys():\n await self.bot.send_message(channel, player.mention)\n intro = discord.Embed(title = \"Welcome to Mafia!\", description = \"If you haven't read the rules yet, please type /helpM to view them in your dm!\", colour = discord.Colour.dark_purple())\n intro.set_image(url = \"https://pre00.deviantart.net/5183/th/pre/i/2018/011/f/5/league_of_legends___mafia_miss_fortune_by_snatti89-dbznniv.jpg\")\n await self.bot.send_message(channel, embed = intro)\n await asyncio.sleep(3)\n\n await self.bot.send_message(channel, \"Alright! Let this game begin!\")\n await asyncio.sleep(1)\n mafiaList = []\n mafiaCount = 0\n for player, data in self.mafiaPlayers.items():\n if(data.roleName == 'mafia'):\n mafiaList.append(player)\n mafiaCount += 1\n\n\n while True:\n doctorAlive = False\n detAlive = False\n temp = [] # names\n for player, data in self.mafiaPlayers.items():\n if (data.alive == True):\n temp.append(player.name.lower())\n embed = discord.Embed(title = \"It is currently nightime, so everyone mute yourself. Please.\", colour = discord.Colour.dark_blue())\n await self.bot.send_message(channel, embed = embed)\n\n await asyncio.sleep(3)\n embed = discord.Embed(title = \"Mafia please check your dm.\", colour = discord.Colour.dark_green())\n await self.bot.send_message(channel, embed = embed)\n\n\n #Mafia turn\n #self.MafiaTurn(ctx)\n \n mafiaNames = []\n for item in mafiaList:\n mafiaNames.append(item.name.lower())\n mafiaKillVote = {}\n for player in mafiaList:\n\n tempM = []\n\n for thing in temp:\n if not thing in mafiaNames:\n tempM.append(thing)\n\n embed = discord.Embed(title = \"Targets\", colour = discord.Colour.purple())\n embed.add_field(name = \"Who is your target?\", value = \"Be sure to include any numbers and spaces\", inline = False)\n for item in tempM:\n embed.add_field(name = \"{}\".format(item), value = \"Kill me!\", inline = False)\n embed.set_image(url = \"https://www.mobafire.com/images/champion/skins/landscape/graves-mafia.jpg\")\n await self.bot.send_message(player, embed = embed)\n\n answer = await self.bot.wait_for_message(author = player)\n while True:\n if answer.content.lower() in tempM:\n self.victim = answer.content.lower()\n await self.bot.send_message(player, \"Gotcha. You may now return to the mafia channel\")\n await self.bot.send_message(channel, \"Got it Mafia.\")\n break\n else:\n await self.bot.send_message(player, \"Error. Please check your spelling. Be sure to include any spaces, and numbers!\")\n answer = await self.bot.wait_for_message(author = player)\n \n #Doctor turn\n \n for player, data in self.mafiaPlayers.items():\n if(data.roleName == 'doctor'):\n doctorUser = player\n doctorAlive = True\n\n # Only if doc is alive\n if doctorAlive == True:\n await self.bot.send_message(channel, \"Doctor please check your DM.\")\n embed = discord.Embed(title = \"Targets\", colour = discord.Colour.purple())\n tempD = []\n for stuff in temp:\n if stuff.lower() != self.pastHeal:\n tempD.append(stuff)\n for item in tempD:\n embed.add_field(name = \"{}\".format(item), value = \"Save me!\", inline = False)\n \n embed.set_image(url = \"https://vignette.wikia.nocookie.net/leagueoflegends/images/f/f7/Akali_NurseSkin_old.jpg/revision/latest?cb=20120609043410\")\n await self.bot.send_message(doctorUser, embed = embed) \n \n await self.bot.send_message(doctorUser, \"Alright who do you want to save?\")\n answer = await self.bot.wait_for_message(author = doctorUser)\n while True:\n if answer.content.lower() == self.pastHeal:\n await self.bot.send_message(doctorUser, \"You cannot heal the same person twice in a row!\")\n answer = await self.bot.wait_for_message(author = doctorUser)\n elif answer.content.lower() in tempD:\n self.healVictim = answer.content.lower()\n self.pastHeal = answer.content.lower()\n await self.bot.send_message(doctorUser, \"Gotcha. You may now return to the mafia channel.\")\n await self.bot.send_message(channel, \"Got it Doctor.\")\n break\n else:\n await self.bot.send_message(player, \"Error. Please check your spelling. Be sure to include any spaces and numbers!\")\n answer = await self.bot.wait_for_message(author = doctorUser)\n\n #Detective turn\n \n for player, data in self.mafiaPlayers.items():\n if(data.roleName == 'detective'):\n detUser = player\n detAlive = True\n\n # only if det is alive\n if detAlive == True:\n tempDT = []\n await self.bot.send_message(channel, \"Detective please check your DMs.\")\n\n embed = discord.Embed(title = \"Targets\", colour = discord.Colour.purple())\n embed.add_field(name = \"Who do you suspect?\", value = \"Please include all spaces and numbers.\", inline = False)\n for item in tempDT:\n embed.add_field(name = \"{}\".format(item), value = \"Pick me!\", inline = True)\n embed.set_image(url = \"https://na.leagueoflegends.com/sites/default/files/styles/scale_xlarge/public/upload/cops_1920.jpg?itok=-T6pbISx\")\n await self.bot.send_message(detUser, embed = embed)\n for stuff in temp:\n if stuff != detUser.name:\n tempDT.append(stuff)\n suspect = \"Boi\"\n for player, data in self.mafiaPlayers.items():\n if(data.roleName == 'suspect'):\n suspect = player.name\n answer = await self.bot.wait_for_message(author = detUser)\n while True:\n if answer.content.lower() in tempDT:\n if answer.content.lower() in mafiaNames or answer.content.lower() == suspect:\n embed = discord.Embed(title = \"Yes. That person is the mafia. Now try to convince the others. Please return to the mafia chat now.\", colour = discord.Colour.green())\n \n else:\n embed = discord.Embed(title = \"Sorry. That person is not the mafia. Please return to the mafia chat now.\", colour = discord.Colour.dark_red())\n \n await self.bot.send_message(detUser, embed = embed)\n break\n else:\n await self.bot.send_message(detUser, \"Error. Please check your spelling. Be sure to include any spaces, and numbers!\")\n answer = await self.bot.wait_for_message(author = detUser)\n\n if self.victim == self.healVictim:\n saved = True\n else:\n saved = False\n\n #Storytime\n await self.bot.send_message(channel, \"Alright everybody get your ass back here and unmute yourself. It's storytime.\")\n await asyncio.sleep(3)\n story1 = discord.Embed(title = \"Story\", description = \"All of these stories are written by Ernest and Leonard\", colour = discord.Colour.purple())\n await self.bot.send_message(channel, embed = story1)\n if saved == True:\n aStory = story.storyTime(\"alive\", self.victim)\n storyEmbed = discord.Embed(title = \"{} lives!\".format(self.victim), description = \"{}\".format(aStory), colour = discord.Colour.green())\n storyEmbed.set_thumbnail(url = \"https://vignette.wikia.nocookie.net/dragonfable/images/f/f1/Heal_Icon.png/revision/latest?cb=20130329031111\")\n else:\n aStory = story.storyTime(\"dead\", self.victim)\n storyEmbed = discord.Embed(title = \"{} died :(\".format(self.victim), description = \"{}\".format(aStory), colour = discord.Colour.red())\n storyEmbed.set_thumbnail(url = \"https://image.flaticon.com/icons/png/512/155/155266.png\")\n for player, data in self.mafiaPlayers.items():\n if (player.name.lower() == self.victim):\n data.alive = False\n\n await self.bot.send_message(channel, embed = storyEmbed)\n await asyncio.sleep(3)\n check = self.checkWin(mafiaCount)\n\n\n if check == \"m\":\n embed = discord.Embed(title = \"The mafia(s) win!\", colour = discord.Colour.purple())\n for item in mafiaList:\n embed.add_field(name = \"{}\".format(item.name), value = \"I'm the Mafia!\", inline = False)\n await self.bot.send_message(channel, embed = embed)\n await self.bot.send_message(channel, \"Thank you all for playing!\")\n await asyncio.sleep(10)\n await self.bot.delete_channel(channel)\n break\n\n\n elif check == \"v\":\n embed = discord.Embed(title = \"The villagers win\", colour = discord.Colour.purple())\n for item in mafiaList:\n embed.add_field(name = \"{}\".format(item.name), value = \"I'm the Mafia!\", inline = False)\n await self.bot.send_message(channel, embed = embed)\n await self.bot.send_message(channel, \"Thank you all for playing!\")\n await asyncio.sleep(10)\n await self.bot.delete_channel(channel)\n break\n\n\n elif check == \"none\": # lynch\n await self.bot.send_message(channel, \"Now I'll give you guys 2 min to talk.\")\n #await asyncio.sleep(120)\n\n # nomination\n nom = discord.Embed(title = \"Players:\", colour = discord.Colour.purple())\n await self.bot.send_message(channel, \"Alright! Any nominations? Just type them in the chat. You have 5 seconds to submit each nomination. When you're done just wait for the timer to finish.\")\n for item in temp:\n nom.add_field(name = \"{}\".format(item), value = \"Pick me!\", inline = False)\n await self.bot.send_message(channel, embed = nom)\n\n\n nomination = await self.bot.wait_for_message(timeout = 5, channel = channel)\n embed = discord.Embed(title = \"Nominations\", colour = discord.Colour.purple())\n while True:\n if nomination == None:\n await self.bot.send_message(channel, \"The nomination time is closed.\")\n if self.nominateList:\n await self.bot.send_message(channel, embed = embed)\n break\n elif nomination.author == self.bot.user:\n nomination = await self.bot.wait_for_message(timeout = 5, channel = channel)\n \n elif nomination.content.lower() in temp and not nomination.content.lower() in self.nominateList:\n self.nominateList.append(nomination.content.lower())\n embed.add_field(name = \"{}\".format(item), value = \"Nominated to die!\", inline = False)\n await self.bot.send_message(channel, \"{} has been added to the nomination list. Any other ones?\".format(nomination.content.lower()))\n await self.bot.send_message(channel, embed = embed)\n \n nomination = await self.bot.wait_for_message(timeout = 5, channel = channel)\n elif not nomination.content.lower() in temp or nomination.content.lower() in self.nominateList:\n await self.bot.send_message(channel, \"Error. Not valid nomination. This person either doesn't exist or is already in the nomination list.\")\n nomination = await self.bot.wait_for_message(timeout = 5, channel = channel)\n\n # voting time\n if self.nominateList:\n authors = []\n scoreName = []\n score = []\n await self.bot.send_message(channel, \"Ok! Now it's time to vote! The person with the most votes dies and he or she must have two or more votes.\")\n for item in self.nominateList:\n scoreName.append(item)\n votes = 0\n await self.bot.send_message(channel, \"Who wants to vote for {}? Type v to vote.\".format(item))\n vote = await self.bot.wait_for_message(timeout = 5, content = \"v\", channel = channel)\n\n\n while True:\n if vote == None:\n break\n elif vote.author == self.bot.user:\n vote = await self.bot.wait_for_message(timeout = 5, content = \"v\", channel = channel)\n elif vote.author.name in authors:\n await self.bot.send_message(channel, \"You have voted already. Or your input was incorrect.\")\n vote = await self.bot.wait_for_message(timeout = 5, content = \"v\", channel = channel)\n elif not vote.author.name.lower() in temp:\n await self.bot.send_message(channel, \"You are not in the game, or you're dead.\")\n vote = await self.bot.wait_for_message(timeout = 5, content = \"v\", channel = channel)\n elif not vote.author.name.lower() in authors and vote.author.name.lower() in temp:\n authors.append(vote.author.name)\n votes+=1\n await self.bot.send_message(channel, \"One vote has been put into {}\".format(item))\n vote = await self.bot.wait_for_message(timeout = 5, content = \"v\", channel = channel)\n \n score.append(votes)\n embed = discord.Embed(title = \"Total votes for {}\".format(item), description = \"{}\".format(votes), colour = discord.Colour.purple())\n await self.bot.send_message(channel, embed = embed)\n \n\n # finds largest vote\n largestVote = 0\n for item in score:\n if item > largestVote and item > 1:\n largestVote = item\n elif item == largestVote:\n largestVote = 0\n \n # kills nominated\n\n if largestVote != 0:\n deadGuy = scoreName[score.index(largestVote)]\n embed = discord.Embed(title = \"{} has been hanged by the village. Press f to pay respect.\".format(deadGuy), colour = discord.Colour.red())\n embed.set_image(url = \"https://cdn.shopify.com/s/files/1/0895/0864/products/42-47714084_1024x1024.jpeg?v=1451772538\")\n await self.bot.send_message(channel, embed = embed)\n for player, data in self.mafiaPlayers.items():\n if (player.name.lower() == deadGuy.lower()):\n data.alive = False\n elif largestVote == 0:\n await self.bot.send_message(channel, \"No one was hanged.\")\n \n else:\n await self.bot.send_message(channel, \"No one was hanged.\")\n \n check = self.checkWin(mafiaCount)\n\n if check == \"m\":\n embed = discord.Embed(title = \"The mafia(s) win!\", colour = discord.Colour.purple())\n for item in mafiaList:\n embed.add_field(name = \"{}\".format(item.name), value = \"I'm the Mafia!\", inline = False)\n await self.bot.send_message(channel, embed = embed)\n await asyncio.sleep(10)\n await self.bot.delete_channel(channel)\n break\n elif check == \"v\":\n embed = discord.Embed(title = \"The villagers win!\", colour = discord.Colour.purple())\n for item in mafiaList:\n embed.add_field(name = \"{}\".format(item.name), value = \"I'm the Mafia!\", inline = False)\n await self.bot.send_message(channel, embed = embed)\n await asyncio.sleep(10)\n await self.bot.delete_channel(channel)\n break\n self.ready = False\n self.gameOn = False\n \n \n \n\n @commands.command(pass_context = True)\n async def helpM(self, ctx):\n embed = discord.Embed(title = \"Mafia Game\", colour = discord.Colour.orange())\n embed.add_field(name = \"How to play:\", value = \"To play, there must be at least 5 people in the Mafia party.\", inline = False)\n embed.add_field(name = \"#1\", value = \"When the game starts, each player will receive their role through dm.\", inline = False)\n embed.add_field(name = \"#2\", value = \"Everyone will go to sleep. The Mafia would be the first to wake up, and through dm he/she can choose which player to kill.\", inline = False)\n embed.add_field(name = \"#3\", value = \"After, the doctor will wake up, and he/she can choose a person to save through dm.\", inline = False)\n embed.add_field(name = \"#4\", value = \"Finally, the detective will wake up and choose a person to accuse through dm. He/she would be informed if the person is the Mafia. If he/her investigates the suspect, then he/she will be informed that the suspect is the mafia.\", inline = False)\n embed.add_field(name = \"#5\", value = \"Everybody wakes up and the bot will inform you through the mafia channel who was killed. The group has a minute to discuss who is the Mafia.\", inline = False)\n embed.add_field(name = \"#6\", value = \"Everyone then nominate and vote on people to lynch. The most voted person will then be lynched.\")\n embed.add_field(name = \"#7\", value = \"The cycle continues until only if the number of mafias are greater than villagers, the mafia kills everyone, or all the mafia dies.\")\n embed.set_footer(text = \"For more information, type /helpR for roles, /helpC for commands, and /helpGame for setup.\")\n await self.bot.send_message(ctx.message.author, embed = embed)\n\n @commands.command(pass_context = True)\n async def helpR(self, ctx):\n embed = discord.Embed(title = \"Mafia Roles\", colour = discord.Colour.orange())\n embed.add_field(name = \"Mafia\", value = \"Side: Mafia. Your role is to kill everyone. And don't get caught.\", inline = False)\n embed.add_field(name = \"Doctor\", value = \"Side: Villager. Your role is to save people. You cannot save the same person twice in a row.\", inline = False)\n embed.add_field(name = \"Detective\", value = \"Side: Villager. Your role is to find the mafia and tell everyone.\", inline = False)\n embed.add_field(name = \"Suspect\", value = \"Side: Villager. When inspected by the detective, the suspect would return Mafia, even though the suspect is on the villager's side. The suspect won't know that he/she is a suspect. There must be at least 6 people to have a chance of gaining this role.\", inline = False)\n await self.bot.send_message(ctx.message.author, embed = embed)\n \n @commands.command(pass_context = True)\n async def helpC(self, ctx):\n embed = discord.Embed(title = \"Mafia Commands\", colour = discord.Colour.orange())\n embed.add_field(name = \"/joinP\", value = \"Joins the current mafia party.\", inline = False)\n embed.add_field(name = \"/leaveP\", value = \"Leaves the current mafia party.\", inline = False)\n embed.add_field(name = \"/party\", value = \"Displays current party.\", inline = False)\n embed.add_field(name = \"/setGame\", value = \"Sets up the game.(Must do before /start)\", inline = False)\n embed.add_field(name = \"/start\", value = \"Starts the game with the current people in the mafia party. Must do /setGame first.\", inline = False)\n await self.bot.send_message(ctx.message.author, embed = embed)\n \n @commands.command(pass_context = True)\n async def helpGame(self, ctx):\n embed = discord.Embed(title = \"Mafia Setup\", colour = discord.Colour.orange())\n embed.add_field(name = \"Requirement:\", value = \"There must be at least 5 people in the mafia party.\", inline = False)\n embed.add_field(name = \"Joining the Game:\", value = \"Everyone playing must enter /joinP to join the party. Type /leaveP to leave the party.\", inline = False)\n embed.add_field(name = \"Step 1\", value = \"Enter /setGame to set up and assign the roles for the game.\", inline = False)\n embed.add_field(name = \"Step 2\", value = \"Enter /start to start the game.\", inline = False)\n embed.add_field(name = \"Step 3\", value = \"Play\", inline = False)\n await self.bot.send_message(ctx.message.author, embed = embed)\n \n async def MafiaTurn(self, ctx):\n \n mafiaList = []\n for player, data in self.mafiaPlayers.items():\n if(data.roleName == 'mafia'):\n mafiaList.append(player)\n \n mafiaKillVote = {}\n for player in mafiaList:\n await self.bot.send_message(player, 'Vote for a player to kill! (The vote must be unanimous)')\n await self.bot.send_message(player, \"Who is your target? (Just type the name. Include any spaces and numbers.)\")\n embed = self.displayMember(ctx.message.channel, self.mafiaList)\n await self.bot.send_message(player, embed = embed)\n answer = await self.bot.wait_for_message(author = player, channel = player)\n while True:\n if answer.content.lower() in self.liveList:\n self.victim = answer.content()\n await self.bot.send_message(ctx.message.channel, \"Got it\")\n break\n else:\n await self.bot.send_message(player, \"Error. Make sure your spelling is correct and you include the whole name(including numbers). Also no /.\")\n answer = await self.bot.wait_for_message(author = player, channel = player)\n\n def findChannel(self, server):\n for item in server.channels:\n if item.name == 'mafia':\n return item\n \n def checkGame(self, mafias, status, mafiaV):\n num = 0\n if mafiaV == True:\n for player, data in mafias.items():\n if data.roleName == \"mafia\":\n if data.alive == status:\n num += 1\n else:\n for player, data in mafias.items():\n if data.roleName != \"mafia\":\n if data.alive == status:\n num += 1\n return num\n \n def checkWin(self, mafiaCount):\n self.mLive = self.checkGame(self.mafiaPlayers, True, True)\n self.mDead = self.checkGame(self.mafiaPlayers, False, True)\n self.vLive = self.checkGame(self.mafiaPlayers, True, False)\n self.vDead = self.checkGame(self.mafiaPlayers, False, False)\n if self.mLive >= self.vLive or (self.vLive ==1 and self.mLive >= 1):\n return \"m\"\n elif self.mDead == mafiaCount:\n return \"v\"\n else:\n return \"none\"\n def randInt(self, chance, whole):\n result = random.randint(chance, whole)\n if result <= chance:\n return True\n else:\n return False\n\n def setRoles(self, ctx, group, role):\n role = random.choice(group)\n group.remove(role)\n\n def displayMember(self, server, group):\n embed = discord.Embed(title = \"Targets\", colour = discord.Colour.purple())\n for item in group.keys():\n name = server.get_member(item)\n embed.add_field(name = \"{}\".format(name), value = \"Kill me!\", inline = False)\n return embed\n\ndef setup(bot):\n bot.add_cog(mafia(bot))","repo_name":"ernestLin0805/Ghoulbot","sub_path":"MAFIA/mafia.py","file_name":"mafia.py","file_ext":"py","file_size_in_byte":33209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"70531966209","text":"class Song:\n \"\"\"Class for representing information about a song\"\"\"\n def __init__(self, title, artist, album):\n self.title = title\n self.artist = artist\n self.album = album\n\n def output(self):\n print('Title: \"{0}\"').format(self.title)\n print('Artist: {0}').format(self.artist)\n print('Album: {0}\\n').format(self.album)\n\nif __name__ == \"__main__\":\n songs = []\n songs.append(Song(\"Between Two Points\", \"The Glitch Mob\", \"Drink the Sea\"))\n songs.append(Song(\"Ghosts 'n' Stuff (Sub Focus Remix)\", \"Deadmau5\", \"Ghosts Album\"))\n songs.append(Song(\"All the Cash\", \"Evil Nine\", \"All the Cash (Single)\"))\n\n for song in songs: song.output()","repo_name":"bcjordan/python","sub_path":"lab01/Song.py","file_name":"Song.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"9525828943","text":"import gymnasium as gym\nfrom typing import Tuple\nimport numpy as np\nimport os.path as osp\nimport pybullet\nfrom pybullet_utils import bullet_client\nimport time\n\nfrom legged_gym import LEGGED_GYM_ROOT_DIR\nfrom legged_gym.utils.gamepad import Gamepad\nfrom configs.definitions import DeploymentConfig, NormalizationConfig, CommandsConfig\nfrom robot_deployment.robots.motors import MotorControlMode\nfrom robot_deployment.robots.motors import MotorCommand\nfrom robot_deployment.robots import a1\nfrom robot_deployment.robots import a1_robot\n\n\nclass LocomotionGymEnv(gym.Env):\n \"\"\"The gym environment for the locomotion tasks.\"\"\"\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 100\n }\n pybullet_client: bullet_client.BulletClient\n\n def __init__(\n self,\n config: DeploymentConfig,\n sensors: Tuple[str, ...],\n obs_scales: NormalizationConfig.NormalizationObsScalesConfig,\n command_ranges: CommandsConfig.CommandRangesConfig\n ):\n # set instance variables from arguments\n self.config = config\n self.obs_scales = obs_scales\n self.use_real_robot = config.use_real_robot\n self.get_commands_from_joystick = config.get_commands_from_joystick\n self.command_ranges = command_ranges\n self.sensors = sensors\n self.hard_reset = True\n self.last_frame_time = 0.\n self.env_time_step = self.config.timestep * self.config.action_repeat\n\n self._setup_robot()\n self.default_motor_angles = self.robot.motor_group.init_positions\n self.observation_space = self._build_observation_space()\n self.action_space = self._build_action_space()\n\n if self.get_commands_from_joystick:\n self.gamepad = Gamepad(self.command_ranges)\n self.commands = np.array([0., 0., 0.])\n\n def _setup_robot(self):\n # make the simulator instance\n connection_mode = pybullet.GUI if self.config.render.show_gui and not self.use_real_robot else pybullet.DIRECT\n self.pybullet_client = bullet_client.BulletClient(connection_mode=connection_mode)\n self._reset_sim()\n\n # construct robot\n robot_ctor = a1_robot.A1Robot if self.use_real_robot else a1.A1\n self.robot = robot_ctor(\n pybullet_client=self.pybullet_client,\n sim_conf=self.config,\n motor_control_mode=MotorControlMode.POSITION\n )\n\n if self.config.render.show_gui and not self.use_real_robot:\n self.pybullet_client.configureDebugVisualizer(self.pybullet_client.COV_ENABLE_RENDERING, 1)\n\n self.clock = lambda: self.robot.time_since_reset\n self.last_action: np.ndarray = None\n self.timesteps = None\n\n def reset(self):\n if self.hard_reset:\n # clear the simulation world and rebuild the robot interface\n self._reset_sim()\n\n self.robot.reset(hard_reset=self.hard_reset)\n self.last_action = np.zeros(self.action_space.shape)\n self.timesteps = 0\n if self.config.render.show_gui and not self.use_real_robot:\n self.pybullet_client.configureDebugVisualizer(self.pybullet_client.COV_ENABLE_RENDERING, 1)\n\n self.current_time = time.time()\n return self.get_observation(), self.get_full_observation()\n\n def step(self, action):\n \"\"\"Step forward the environment, given the action.\n\n action: 12-dimensional NumPy array of desired motor angles\n \"\"\"\n clipped_action = np.clip(\n action,\n self.robot.motor_group.min_positions,\n self.robot.motor_group.max_positions\n )\n motor_action = MotorCommand(\n desired_position=clipped_action,\n kp=self.robot.motor_group.kps,\n kd=self.robot.motor_group.kds\n )\n self.robot.step(motor_action)\n if self.config.render.show_gui:\n if not self.use_real_robot:\n duration = time.time() - self.current_time\n if duration < self.robot.control_timestep:\n time.sleep(self.robot.control_timestep - duration)\n self.current_time = time.time()\n yaw = self.config.render.camera_yaw\n if not self.config.render.fix_camera_yaw:\n yaw += np.degrees(self.robot.base_orientation_rpy[2])\n self.pybullet_client.resetDebugVisualizerCamera(\n cameraDistance=self.config.render.camera_dist,\n cameraYaw=yaw,\n cameraPitch=self.config.render.camera_pitch,\n cameraTargetPosition=self.robot.base_position\n )\n\n terminated = not self.is_safe\n self.last_action = clipped_action\n self.timesteps += 1\n return self.get_observation(), 0, terminated, False, self.get_full_observation()\n\n def render(self):\n view_matrix = self.pybullet_client.computeViewMatrixFromYawPitchRoll(\n cameraTargetPosition=self.robot.base_position,\n distance=self.config.render.camera_dist,\n yaw=self.config.render.camera_yaw,\n pitch=self.config.render.camera_pitch,\n roll=0,\n upAxisIndex=2\n )\n projection_matrix = self.pybullet_client.computeProjectionMatrixFOV(\n fov=60,\n aspect=self.config.render.render_width/self.config.render.render_height,\n nearVal=0.1,\n farVal=100.\n )\n _, _, rgba, _, _ = self.pybullet_client.getCameraImage(\n width=self.config.render.render_width,\n height=self.config.render.render_height,\n renderer=self.pybullet_client.ER_BULLET_HARDWARE_OPENGL,\n viewMatrix=view_matrix,\n projectionMatrix=projection_matrix\n )\n rgb_array = np.array(rgba)[:, :, :3]\n return rgb_array\n\n def close(self):\n self.pybullet_client.disconnect()\n\n def get_observation(self):\n obs_list = []\n for sensor in self.sensors:\n if sensor == \"base_ang_vel\":\n obs_list.append(self.robot.base_angular_velocity_in_base_frame * self.obs_scales.ang_vel)\n elif sensor == \"yaw_rate\":\n obs_list.append(self.robot.base_angular_velocity_in_base_frame[[2]] * self.obs_scales.ang_vel)\n elif sensor == \"commands\":\n if self.get_commands_from_joystick:\n lin_vel_x, lin_vel_y, ang_vel, right_bump = self.gamepad.get_command()\n self.commands = np.array([lin_vel_x, lin_vel_y, ang_vel])\n else:\n raise ValueError(\"no joystick (or other input) available for commands\")\n multiplier = np.array([self.obs_scales.lin_vel, self.obs_scales.lin_vel, self.obs_scales.ang_vel])\n obs_list.append(self.commands * multiplier)\n elif sensor == \"motor_pos\":\n obs_list.append((self.robot.motor_angles - self.default_motor_angles) * self.obs_scales.dof_pos)\n elif sensor == \"motor_vel\":\n obs_list.append(self.robot.motor_velocities * self.obs_scales.dof_vel)\n elif sensor == \"projected_gravity\":\n _, inv_base_orientation = self.pybullet_client.invertTransform(\n [0, 0, 0], self.robot.base_orientation_quat\n )\n projected_gravity = self.pybullet_client.multiplyTransforms(\n [0, 0, 0], inv_base_orientation, [0, 0, -1], [0, 0, 0, 1]\n )[0]\n obs_list.append(projected_gravity)\n elif sensor == \"last_action\":\n obs_list.append((self.last_action - self.default_motor_angles) / self.config.action_scale * self.obs_scales.last_action)\n else:\n raise ValueError(f\"Sensor not recognized: {sensor}\")\n\n return np.concatenate(obs_list)\n\n def get_full_observation(self):\n obs_dict = dict(\n base_angular_velocity_in_base_frame=self.robot.base_angular_velocity_in_base_frame,\n base_position=self.robot.base_position,\n base_orientation_quat=self.robot.base_orientation_quat,\n base_orientation_rpy=self.robot.base_orientation_rpy,\n base_velocity=self.robot.base_velocity,\n base_velocity_in_base_frame=self.robot.base_velocity_in_base_frame,\n foot_contact=self.robot.foot_contacts,\n foot_contact_history=self.robot.foot_contact_history,\n foot_position=self.robot.foot_positions_in_base_frame,\n foot_velocity=self.robot.foot_velocities_in_base_frame,\n motor_angle=self.robot.motor_angles,\n motor_torque=self.robot.motor_torques,\n motor_temperature=self.robot.motor_temperatures,\n motor_velocity=self.robot.motor_velocities,\n )\n return obs_dict\n\n @property\n def is_safe(self):\n # done\n rot_mat = np.array(\n self.pybullet_client.getMatrixFromQuaternion(self.robot.base_orientation_quat)\n ).reshape((3, 3))\n up_vec = rot_mat[2, 2]\n base_height = self.robot.base_position[2]\n return up_vec > 0.5 and base_height > 0.05\n\n def _reset_sim(self):\n self.pybullet_client.configureDebugVisualizer(self.pybullet_client.COV_ENABLE_RENDERING, 0)\n self.pybullet_client.resetSimulation()\n self.pybullet_client.setAdditionalSearchPath(osp.join(LEGGED_GYM_ROOT_DIR, 'resources'))\n self.pybullet_client.setPhysicsEngineParameter(numSolverIterations=self.config.num_solver_iterations)\n self.pybullet_client.setTimeStep(self.config.timestep)\n self.pybullet_client.setGravity(0, 0, -9.81)\n self.pybullet_client.setPhysicsEngineParameter(enableConeFriction=0)\n\n # ground\n self.ground_id = self.pybullet_client.loadURDF('plane.urdf')\n #self.pybullet_client.changeDynamics(self.ground_id, -1, restitution=0.5)\n #self.pybullet_client.changeDynamics(self.ground_id, -1, lateralFriction=0.5)\n\n def _build_observation_space(self):\n # TODO\n pass\n\n def _build_action_space(self):\n \"\"\"Builds action space corresponding to joint position control\"\"\"\n return gym.spaces.Box(\n self.robot.motor_group.min_positions,\n self.robot.motor_group.max_positions\n )\n","repo_name":"UWRobotLearning/ground_control","sub_path":"robot_deployment/robot_deployment/envs/locomotion_gym_env.py","file_name":"locomotion_gym_env.py","file_ext":"py","file_size_in_byte":10336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"33287147704","text":"import collections\nimport functools\nimport io\nimport itertools\nimport os\nimport pathlib\nimport random\nfrom typing import Any, Iterator, Mapping, Optional, Tuple, Union\n\nimport numpy as np\nfrom sanpo_dataset.lib import common\nimport tensorflow as tf\n\n\nTRAIN_SPLITNAME = 'train'\nTEST_SPLITNAME = 'test'\n\n_SANPO_REAL_DIRNAME = 'sanpo-real'\n_SANPO_SYNTHETIC_DIRNAME = 'sanpo-synthetic'\n_TRAIN_SPLIT_FILENAME = 'splits/train_session_ids.txt'\n_TEST_SPLIT_FILENAME = 'splits/test_session_ids.txt'\n_DATA_SPLITNAMES = [TRAIN_SPLITNAME, TEST_SPLITNAME]\n_INSTANCE_ID_DIVISOR = 256.0\n\n\nclass SanpoDataset:\n \"\"\"Dataset builder for SANPO dataset.\"\"\"\n\n def __init__(\n self,\n dataset_path: str,\n builder_config: common.SanpoConfig,\n **builder_overrides,\n ) -> None:\n self.builder_config = builder_config.replace(**builder_overrides)\n self._dataset_path = dataset_path\n\n # Verify target sizes\n if self.builder_config.target_shape is not None:\n target_h, target_w = self.builder_config.target_shape\n if target_h > target_w:\n raise ValueError(\n 'target_shape should be [height,width], but you set it to '\n f'[{target_h}, {target_w}] which looks like [width,height].'\n )\n if abs(target_w * 9 / 16 - target_h) > 1:\n raise ValueError(\n f'The target shape [{target_h},{target_w}] aspect ratio must be'\n f' 16:9. Consider setting a target_shape of either [{target_h},'\n f' {int(target_h*16/9)}] or [{int(target_w*9/16)}, {target_w}],'\n ' which would preserve the image aspect ratio.\\n\\nSANPO does not'\n ' perform cropping or color augmentation for you because'\n ' preprocessing strategies can vary by application.'\n # TODO(kwilber): add a crop tool and uncomment the below lines\n # f'To crop the image, you can use the `common.crop_*` '\n # f'family of functions which properly adjust camera intrinsics.'\n )\n\n # TODO(kwilber): Verify the config.\n self._data_sessions = collections.defaultdict(list)\n if self.builder_config.include_real:\n real_dataset_path = os.path.join(dataset_path, _SANPO_REAL_DIRNAME)\n self._real_sessions_train_list = common.SanpoSessionList(\n real_dataset_path,\n session_ids_or_ids_file=os.path.join(\n real_dataset_path, _TRAIN_SPLIT_FILENAME\n ),\n config=self.builder_config,\n )\n self._data_sessions[TRAIN_SPLITNAME].extend(\n self._real_sessions_train_list.get_valid_sessions()\n )\n self._real_sessions_test_list = common.SanpoSessionList(\n real_dataset_path,\n session_ids_or_ids_file=os.path.join(\n real_dataset_path, _TEST_SPLIT_FILENAME\n ),\n config=self.builder_config,\n )\n self._data_sessions[TEST_SPLITNAME].extend(\n self._real_sessions_test_list.get_valid_sessions()\n )\n\n if self.builder_config.include_synthetic:\n synthetic_dataset_path = os.path.join(\n dataset_path, _SANPO_SYNTHETIC_DIRNAME\n )\n self._synthetic_sessions_train_list = common.SanpoSessionList(\n synthetic_dataset_path,\n session_ids_or_ids_file=os.path.join(\n synthetic_dataset_path, _TRAIN_SPLIT_FILENAME\n ),\n config=self.builder_config,\n )\n self._data_sessions[TRAIN_SPLITNAME].extend(\n self._synthetic_sessions_train_list.get_valid_sessions()\n )\n self._synthetic_sessions_test_list = common.SanpoSessionList(\n synthetic_dataset_path,\n session_ids_or_ids_file=os.path.join(\n synthetic_dataset_path, _TEST_SPLIT_FILENAME\n ),\n config=self.builder_config,\n )\n self._data_sessions[TEST_SPLITNAME].extend(\n self._synthetic_sessions_test_list.get_valid_sessions()\n )\n\n if not self._data_sessions[TRAIN_SPLITNAME]:\n raise ValueError('Train split is empty.')\n\n if not self._data_sessions[TEST_SPLITNAME]:\n raise ValueError('Test split is empty.')\n\n # Shuffle the train and test sessions ids.\n for _, sessions_list in self._data_sessions.items():\n random.shuffle(sessions_list)\n\n def _get_tensor_signature(self) -> Mapping[str, tf.TensorSpec]:\n \"\"\"Return output signature for tf.data.Dataset `from_generator`.\"\"\"\n\n signature = {\n common.FEATURE_SESSION_TYPE: tf.TensorSpec(shape=(), dtype=tf.string),\n common.FEATURE_IMAGE: tf.TensorSpec(shape=(), dtype=tf.string),\n common.FEATURE_FRAME_ID: tf.TensorSpec(shape=(), dtype=tf.string),\n common.FEATURE_CAMERA_BASELINE_IN_METERS: tf.TensorSpec(\n shape=(), dtype=tf.float32\n ),\n common.FEATURE_CAMERA_INTRINSICS: tf.TensorSpec(\n shape=(4,), dtype=tf.float32\n ),\n }\n if self.builder_config.dataset_view_mode.is_stereo_mode():\n signature[common.FEATURE_IMAGE_RIGHT] = tf.TensorSpec(\n shape=(), dtype=tf.string\n )\n signature[common.FEATURE_CAMERA_RIGHT_INTRINSICS] = tf.TensorSpec(\n shape=(4,), dtype=tf.float32\n )\n\n if self.builder_config.feature_metric_depth.to_include():\n signature[common.FEATURE_METRIC_DEPTH_LABEL] = tf.TensorSpec(\n shape=(), dtype=tf.string\n )\n signature[common.FEATURE_HAS_METRIC_DEPTH_LABEL] = tf.TensorSpec(\n shape=(), dtype=tf.bool\n )\n\n if self.builder_config.feature_metric_depth_zed.to_include():\n signature[common.FEATURE_METRIC_DEPTH_ZED_LABEL] = tf.TensorSpec(\n shape=(), dtype=tf.string\n )\n signature[common.FEATURE_HAS_METRIC_DEPTH_ZED_LABEL] = tf.TensorSpec(\n shape=(), dtype=tf.bool\n )\n\n if self.builder_config.feature_panoptic_mask.to_include():\n signature[common.FEATURE_PANOPTIC_MASK_LABEL] = tf.TensorSpec(\n shape=(), dtype=tf.string\n )\n signature[common.FEATURE_HAS_PANOPTIC_MASK_LABEL] = tf.TensorSpec(\n shape=(), dtype=tf.bool\n )\n\n if self.builder_config.feature_camera_pose.to_include():\n signature[common.FEATURE_TRACKING_STATE] = tf.TensorSpec(\n shape=(), dtype=tf.bool\n )\n signature[common.FEATURE_CAMERA_TRANSLATIONS] = tf.TensorSpec(\n shape=(3,), dtype=tf.float32\n )\n signature[common.FEATURE_CAMERA_QUATERNIONS] = tf.TensorSpec(\n shape=(4,), dtype=tf.float32\n )\n\n return signature\n\n def _maybe_resize(\n self, tensor: tf.Tensor, *, use_nearest_neighbor: bool\n ) -> tf.Tensor:\n \"\"\"Optionally resize the input tensor.\"\"\"\n if self.builder_config.target_shape is None:\n return tensor\n target_h, target_w = self.builder_config.target_shape\n if use_nearest_neighbor:\n resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n else:\n resize_method = tf.image.ResizeMethod.BILINEAR\n return tf.image.resize(tensor, [target_h, target_w], method=resize_method)\n\n def _tf_decode_image(self, filename: tf.Tensor) -> tf.Tensor:\n # can't use tf.io.decode_image here because\n # https://github.com/tensorflow/tensorflow/issues/9356\n return tf.io.decode_png(\n tf.io.read_file(filename),\n channels=3,\n dtype=tf.uint8,\n )\n\n def _tf_load_image(self, filename: tf.Tensor) -> tf.Tensor:\n image = self._tf_decode_image(filename)\n return tf.image.convert_image_dtype(\n image, tf.float32, saturate=False, name=None\n )\n\n def _np_load_npz(self, filename: tf.Tensor) -> tf.Tensor:\n with common.wrapped_open(filename.numpy(), 'rb') as f:\n b = io.BytesIO(f.read())\n npz = np.load(b)\n assert len(npz.files) == 1\n array = np.expand_dims(npz[npz.files[0]], -1).astype(np.float32)\n return tf.convert_to_tensor(array, dtype=tf.float32)\n\n def _tf_load_npz(self, filename: tf.Tensor) -> tf.Tensor:\n arr = tf.py_function(self._np_load_npz, [filename], Tout=[tf.float32])[0]\n return tf.ensure_shape(arr, [None, None, 1])\n\n def _tf_load_float16_gzipped(self, filename: tf.Tensor) -> tf.Tensor:\n data_tensor = tf.io.decode_raw(\n tf.io.decode_compressed(tf.io.read_file(filename), 'GZIP'),\n tf.float16,\n little_endian=True,\n )\n height = data_tensor[0]\n width = data_tensor[1]\n x = tf.reshape(data_tensor[2:], [height, width, 1])\n return tf.ensure_shape(x, [None, None, 1])\n\n @tf.function\n def _tf_load_panoptic_labels(\n self,\n features: Mapping[str, tf.Tensor],\n image: tf.Tensor,\n ) -> Mapping[str, tf.Tensor]:\n \"\"\"Loads panoptic segmentation labels if they are included.\"\"\"\n if common.FEATURE_PANOPTIC_MASK_LABEL in features:\n if features[common.FEATURE_HAS_PANOPTIC_MASK_LABEL]:\n mask = tf.cast(\n self._tf_decode_image(features[common.FEATURE_PANOPTIC_MASK_LABEL]),\n dtype=tf.float32,\n )\n else:\n mask = tf.zeros_like(image, dtype=tf.float32)\n\n # We save panoptic label as a 3 channel image.\n # First channel contains the semantic label.\n # Instance id can be computed using the second and third channel\n # using the following formula:\n # `instance_id = mask[:,:,1] * 256 + mask[:,:,2].`\n # The panoptic label is computed using the following formula:\n # `panoptic_label = semantic_label * label_divisor + instance_id.`\n\n semantic_label = mask[:, :, 0]\n instance_id = (\n tf.math.scalar_mul(_INSTANCE_ID_DIVISOR, mask[:, :, 1])\n + mask[:, :, 2]\n )\n semantic_label = tf.expand_dims(semantic_label, -1)\n instance_id = tf.expand_dims(instance_id, -1)\n semantic_label = self._maybe_resize(\n semantic_label, use_nearest_neighbor=True\n )\n instance_id = self._maybe_resize(instance_id, use_nearest_neighbor=True)\n return {\n common.FEATURE_SEMANTIC_LABEL: semantic_label,\n common.FEATURE_INSTANCE_ID: instance_id,\n common.FEATURE_HAS_PANOPTIC_MASK_LABEL: tf.convert_to_tensor(\n features[common.FEATURE_HAS_PANOPTIC_MASK_LABEL]\n ),\n }\n\n return {}\n\n @tf.function\n def _tf_load_camera_pose(\n self, features: Mapping[str, tf.Tensor]\n ) -> Mapping[str, tf.Tensor]:\n \"\"\"Returns camere pose tensors if they are included.\"\"\"\n if common.FEATURE_TRACKING_STATE in features:\n return {\n common.FEATURE_TRACKING_STATE: features[\n common.FEATURE_TRACKING_STATE\n ],\n common.FEATURE_CAMERA_TRANSLATIONS: features[\n common.FEATURE_CAMERA_TRANSLATIONS\n ],\n common.FEATURE_CAMERA_QUATERNIONS: features[\n common.FEATURE_CAMERA_QUATERNIONS\n ],\n }\n\n return {}\n\n @tf.function\n def _tf_load_depth(\n self,\n features: Mapping[str, tf.Tensor],\n image: tf.Tensor,\n ) -> Mapping[str, tf.Tensor]:\n \"\"\"Returns zed depth tensors if they are included.\"\"\"\n new_feats = {}\n if common.FEATURE_METRIC_DEPTH_ZED_LABEL in features:\n if features[common.FEATURE_HAS_METRIC_DEPTH_ZED_LABEL]:\n zed_depth = tf.cast(\n self._tf_load_float16_gzipped(\n features[common.FEATURE_METRIC_DEPTH_ZED_LABEL]\n ),\n tf.float32,\n )\n else:\n zed_depth = tf.zeros_like(image[:, :, 0], dtype=tf.float32)\n zed_depth = tf.expand_dims(zed_depth, -1)\n new_feats[common.FEATURE_HAS_METRIC_DEPTH_ZED_LABEL] = features[\n common.FEATURE_HAS_METRIC_DEPTH_ZED_LABEL\n ]\n zed_depth = self._maybe_resize(zed_depth, use_nearest_neighbor=True)\n new_feats[common.FEATURE_METRIC_DEPTH_ZED_LABEL] = zed_depth\n\n if common.FEATURE_METRIC_DEPTH_LABEL in features:\n if features[common.FEATURE_HAS_METRIC_DEPTH_LABEL]:\n metric_depth = tf.cast(\n self._tf_load_float16_gzipped(\n features[common.FEATURE_METRIC_DEPTH_LABEL]\n ),\n tf.float32,\n )\n else:\n metric_depth = tf.zeros_like(image[:, :, 0], dtype=tf.float32)\n metric_depth = tf.expand_dims(metric_depth, -1)\n new_feats[common.FEATURE_HAS_METRIC_DEPTH_LABEL] = features[\n common.FEATURE_HAS_METRIC_DEPTH_LABEL\n ]\n metric_depth = self._maybe_resize(metric_depth, use_nearest_neighbor=True)\n new_feats[common.FEATURE_METRIC_DEPTH_LABEL] = metric_depth\n\n return new_feats\n\n def _load_data_files(\n self, features: Mapping[str, tf.Tensor]\n ) -> Mapping[str, tf.Tensor]:\n \"\"\"Returns dictionary of loaded feature files.\"\"\"\n\n loaded_features = {}\n for passthrough_feature in [\n common.FEATURE_SESSION_TYPE,\n common.FEATURE_FRAME_ID,\n common.FEATURE_CAMERA_BASELINE_IN_METERS,\n common.FEATURE_CAMERA_INTRINSICS,\n common.FEATURE_CAMERA_RIGHT_INTRINSICS,\n ]:\n if passthrough_feature in features:\n loaded_features[passthrough_feature] = features[passthrough_feature]\n\n # load images\n for image_feature in [\n common.FEATURE_IMAGE,\n common.FEATURE_IMAGE_RIGHT,\n ]:\n if image_feature in features:\n loaded_features[image_feature] = self._maybe_resize(\n self._tf_load_image(features[image_feature]),\n use_nearest_neighbor=False,\n )\n\n # Load panoptic segmentation labels\n loaded_features.update(\n self._tf_load_panoptic_labels(\n features, loaded_features[common.FEATURE_IMAGE]\n )\n )\n # Load camera pose.\n loaded_features.update(self._tf_load_camera_pose(features))\n # Load zed depth.\n loaded_features.update(\n self._tf_load_depth(features, loaded_features[common.FEATURE_IMAGE])\n )\n\n return loaded_features\n\n def _samples_to_tf_frame_features(\n self, samples: Iterator[Mapping[str, Any]]\n ) -> Iterator[Mapping[str, tf.Tensor]]:\n \"\"\"Iterate over {feature_name: tensor, ...} frames in a session.\"\"\"\n for sample in samples:\n features = {}\n for feature_name, feature_value in sample.items():\n if isinstance(feature_value, pathlib.Path):\n feature_value = feature_value.as_posix()\n features[feature_name] = tf.convert_to_tensor(feature_value)\n yield features\n\n def _session_to_frame_dataset(\n self, session: common.SanpoSession\n ) -> tf.data.Dataset:\n \"\"\"Creates a tf.data.Dataset of individual frames from a SanpoSession.\"\"\"\n return self._samples_to_frame_dataset(session.all_frame_itersamples())\n\n def _samples_to_frame_dataset(\n self, samples: Iterator[Mapping[str, Any]]\n ) -> tf.data.Dataset:\n \"\"\"Creates a tf.data.Dataset of frames from the given samples.\"\"\"\n tf_features = functools.partial(\n self._samples_to_tf_frame_features, samples=samples\n )\n return tf.data.Dataset.from_generator(\n tf_features,\n output_signature=self._get_tensor_signature(),\n )\n\n def _session_to_video_datasets(\n self, session: common.SanpoSession\n ) -> Iterator[tf.data.Dataset]:\n \"\"\"Creates tf.data.Datasets of video clips from a SanpoSession.\"\"\"\n for video_samples in session.video_itersamples():\n ds = self._samples_to_frame_dataset(video_samples)\n\n if self.builder_config.video_frame_stride:\n ds = ds.shard(self.builder_config.video_frame_stride, 0)\n\n if not self.builder_config.num_video_frames:\n raise ValueError('num_video_frames must be specified')\n\n # Batch the frames into video clips of length num_video_frames. We must\n # use drop_remainder=True to ensure each video clip has the exact same\n # number of frames because the later mapping to load the data files\n # requires rebatching. This is done so that all the video clips across\n # multiple sessions can be shuffled together before performing the\n # expensive data loading step.\n ds = ds.batch(\n self.builder_config.num_video_frames,\n drop_remainder=True,\n )\n\n yield ds\n\n def to_tf_data(\n self,\n split_name: Optional[str] = None,\n ) -> Union[Tuple[tf.data.Dataset, tf.data.Dataset], tf.data.Dataset]:\n \"\"\"Creates tf.data.Datasets for train and test splits of the session.\n\n Args:\n split_name: If specified, returns a single dataset for this split. If\n None, a dataset for each train and test split is returned. Default: None\n\n Returns:\n A tuple containing the train and test datasets, or a single dataset if\n split_name was specified.\n \"\"\"\n result_datasets = []\n if split_name and split_name not in _DATA_SPLITNAMES:\n raise ValueError('split_name must be one of {}'.format(_DATA_SPLITNAMES))\n\n split_names = [split_name] if split_name else _DATA_SPLITNAMES\n for split_name in split_names:\n sessions = self._data_sessions[split_name]\n\n # Convert each session into a dataset.\n if self.builder_config.dataset_view_mode.is_video_mode():\n # For video mode the dataset will contain one or more 'video clips'\n # (batch of frames).\n session_datasets = itertools.chain.from_iterable(\n map(self._session_to_video_datasets, sessions)\n )\n else:\n # For frame mode the dataset will contain one sample for each video\n # frame.\n session_datasets = map(self._session_to_frame_dataset, sessions)\n\n # Concat all the session datasets together.\n dataset = next(session_datasets)\n for ds in session_datasets:\n dataset = dataset.concatenate(ds)\n\n # Cache the dataset before shuffling.\n dataset = dataset.cache()\n\n # The dataset features now contain filenames. Below we shuffle the\n # data together (frames or video clips) and then load the data from files.\n # Since loading the data is expensive and uses significant memory, it's\n # much more efficient to shuffle before loading the data.\n if self.builder_config.shuffle_buffer_size > 1:\n dataset = dataset.shuffle(\n self.builder_config.shuffle_buffer_size,\n reshuffle_each_iteration=True,\n )\n\n # For video mode, unbatch the video clips into individual frames so we can\n # apply the mapping function to load the data files.\n if self.builder_config.dataset_view_mode.is_video_mode():\n dataset = dataset.unbatch()\n\n # Map the frames by loading the underlying data files into memory.\n # For video mode this is done deterministicly to preserve the order of\n # frames for rebatching back into video clips.\n map_deterministic = self.builder_config.dataset_view_mode.is_video_mode()\n dataset = dataset.map(\n self._load_data_files,\n num_parallel_calls=tf.data.AUTOTUNE,\n deterministic=map_deterministic,\n )\n\n # For video mode, rebatch the frames into videos. Since the number of\n # frames in each video is fixed, this is guaranteed that each frame will\n # return to its correct corresponding video clip.\n if self.builder_config.dataset_view_mode.is_video_mode():\n dataset = dataset.batch(self.builder_config.num_video_frames)\n\n result_datasets.append(dataset)\n\n if len(result_datasets) == 1:\n return result_datasets[0]\n else:\n return tuple(result_datasets)\n","repo_name":"google-research-datasets/sanpo_dataset","sub_path":"sanpo_dataset/lib/tensorflow_dataset.py","file_name":"tensorflow_dataset.py","file_ext":"py","file_size_in_byte":19100,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"43"} +{"seq_id":"11387080409","text":"class Solution:\n def distinctNames(self, ideas: List[str]) -> int:\n word_map = collections.defaultdict(set)\n ans = 0\n\n for w in ideas:\n word_map[w[0]].add(w[1:])\n\n for ch1 in word_map:\n for ch2 in word_map:\n if ch1 == ch2:\n continue\n duplicate = 0\n for w in word_map[ch1]:\n if w in word_map[ch2]:\n duplicate += 1\n\n unique1 = len(word_map[ch1]) - duplicate\n unique2 = len(word_map[ch2]) - duplicate\n\n ans += unique1*unique2\n\n return ans\n","repo_name":"K-G-PRAJWAL/Leetcode","sub_path":"Python/2306-Naming-a-Company.py","file_name":"2306-Naming-a-Company.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"24701283797","text":"import blockpcaapi\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\n\n\nwith open(\"public_key.pub\", \"rb\") as key_file:\n public_key = serialization.load_pem_public_key(\n key_file.read(),\n backend=default_backend()\n )\n\nwith open(\"private_key.pem\", \"rb\") as key_file:\n private_key = serialization.load_pem_private_key(\n key_file.read(),\n password=b'test',\n backend=default_backend()\n )\n\n\n\nmedical_data_sign = private_key.sign(\n medical_data_hash,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n)\n\n\npublic_key_in_bytes = public_key.public_bytes(encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo)\n\nprint(\"medical_data_hash:\")\nprint(medical_data_hash)\nprint(\"medical_data_sign:\")\nprint(medical_data_sign)\nprint(\"public_key_in_bytes\")\nprint(public_key_in_bytes)\n\n","repo_name":"anwarcse12028/BlockchainSDN","sub_path":"test/blockpca-upload.py","file_name":"blockpca-upload.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"32159975921","text":"#!/usr/bin/python\nimport psutil\nimport time\nimport os\nimport sqlite3\nfrom sqlite3 import Error\nfrom playsound import playsound\nimport socket\nimport re\nimport time\nfrom IPy import IP\n\ndef validate_ip_address(address):\n parts = address.split(\".\")\n\n if len(parts) != 4:\n print(\"IP address {} is not valid\".format(address))\n return False\n\n for part in parts:\n if not isinstance(int(part), int):\n print(\"IP address {} is not valid\".format(address))\n return False\n\n if int(part) < 0 or int(part) > 255:\n print(\"IP address {} is not valid\".format(address))\n return False\n \n print(\"IP address {} is valid\".format(address))\n return True \n\ndef get_chrome():\n listOfProcessIds = findProcessIdByName(\"wexond\")\n if len(listOfProcessIds) > 0:\n\n for elem in listOfProcessIds:\n processID = elem['pid']\n processName = elem['name']\n processCreationTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(elem['create_time']))\n# print(str(elem['cmdline'][-1]))\n single_stander = psutil.Process(processID)\n # single_stander.terminate()\n # print(\"Process:\"+processName+\" terminated\")\n if single_stander.connections() is not \"\":\n #playsound(\"alert.mp3\")\n for cons in single_stander.connections():\n# radarcan = socket.gethostbyaddr(cons.raddr)\n #print(re.findall('\"([^']*)\"', str(cons.raddr)))\n stem = str(cons.raddr)\n pattern = \"addr(ip='\"\n fist_part = stem.split(pattern,maxsplit=1)\n ipvepirt = list(fist_part)[-1]\n outp = str(ipvepirt)\n raddr = outp.partition(\"'\")[0]\n rport = outp.partition(\"port=\")[-1][:-1]\n print(raddr+\":\"+rport)\n try:\n validate_ip_address(raddr)\n raddr = IP(raddr)\n raddr = raddr.reverseName()\n time.sleep(1)\n print(raddr)\n print(os.system(\"lsof -i | grep wexond\" ))\n# radarcan = socket.getaddrinfo(get_ip, get_port)\n # print(str(radarcan))\n database = r\"webapp/asskicker.db\"\n \n \n conn = create_connection(database)\n with conn:\n task_2 = (str(cons), str(raddr), str(processCreationTime), str(rport))\n\n # create tasks\n create_task(conn, task_2)\n except:\n pass\n\n \n\n\"copiedfuncs\"\n\n\"\"\"\nList all the ports opened by processes on the local machine\nto run it with sudo: sudo python port_processes.py\n\"\"\"\n\n\ndef create_connection(db_file):\n \"\"\" create a database connection to the SQLite database\n specified by db_file\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n\n return conn\ndef create_task(conn, task):\n \"\"\"\n Create a new task\n :param conn:\n :param task:\n :return:\n \"\"\"\n\n sql = ''' INSERT INTO browserhistory(wholeconn,raddr,Time,rport)\n VALUES(?,?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, task)\n conn.commit()\n\n return cur.lastrowid\n\ndef checkIfProcessRunning(processName):\n '''\n Check if there is any running process that contains the given name processName.\n '''\n #Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n # Check if process name contains the given name string.\n if processName.lower() in proc.name().lower():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False;\ndef findProcessIdByName(processName):\n '''\n Get a list of all the PIDs of a all the running process whose name contains\n the given string processName\n '''\n listOfProcessObjects = []\n #Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'name','cmdline', 'create_time'])\n # Check if process name contains the given name string.\n if processName.lower() in pinfo['name'].lower() :\n listOfProcessObjects.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass\n return listOfProcessObjects;\n\ndef sokaktadilen():\n\n wholetube = psutil.net_connections()\n lugochinese = []\n for pid in wholetube:\n lugochinese.append(pid.pid)\n mylist = list(dict.fromkeys(lugochinese))\n \n\n list_to_stand = ['NetworkManager','dhclient', 'wexond','tor','python3','flask']\n ovidoc = []\n listOfProcessIds = findProcessIdByName(\"wexond\")\n\n for listos in mylist:\n\n\n \n single_stander = psutil.Process(listos)\n ovidoc.append(single_stander.name())\n mylist = list(dict.fromkeys(ovidoc))\n for last_tpo_stand in mylist:\n if last_tpo_stand == \"python3\":\n listOfProcessIds = findProcessIdByName(\"python3\")\n if len(listOfProcessIds) > 0:\n\n for elem in listOfProcessIds:\n processID = elem['pid']\n processName = elem['name']\n processCreationTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(elem['create_time']))\n print(str(elem['cmdline'][-1]))\n if str(elem['cmdline'][-1]) == \"determinist.py\":\n pass\n elif str(elem['cmdline'][-1]) == \"deterics.py\":\n pass\n elif str(elem['cmdline'][-1]) == \"run\":\n pass\n elif str(elem['cmdline'][-1]) == \"deterist.py\":\n pass\n elif str(elem['cmdline'][-1]) == \"detergoogle.py\":\n pass\n\n else:\n print(str(elem['cmdline'][-1]))\n print('Tek olası revshell canum')\n print((processID ,processName,processCreationTime ))\n single_stander = psutil.Process(processID)\n if single_stander.connections() is not \"\":\n #playsound(\"alert.mp3\")\n for cons in single_stander.connections():\n database = r\"/data/data/com.termux/dar_kesim/webapp/asskicker.db\"\n \n \n conn = create_connection(database)\n with conn:\n task_2 = (str(processID), str(processName), str(processCreationTime), str(cons))\n\n # create tasks\n create_task(conn, task_2)\n watereye = open(\"/data/data/com.termux/dar_kesim/webapp/olupbiten.txt\",\"a\")\n\n watereye.write(\"IP:\"+str(processID)+ \"Port:\" + str(processName) + \"Time:\"+ str(processCreationTime))\n watereye.close()\n# single_stander.terminate()\n# playsound(\"alert.mp3\")\n# print(\"Process:\"+processName+\" terminated\")\n\n #database = r\"C:\\sqlite\\db\\pythonsqlite.db\"\n\n \n #conn = create_connection(database)\n #with conn:\n \n \n \n # task_2 = ('Confirm with user about the top requirements', 1, 1, project_id, '2015-01-03', '2015-01-05')\n\n \n # create_task(conn, task_1)\n \n# single_stander = psutil.Process(processID)\n # single_stander.terminate()\n # print(\"Process:\"+processName+\" terminated\")\n\n\n elif last_tpo_stand not in list_to_stand:\n listOfProcessIds = findProcessIdByName(last_tpo_stand)\n if len(listOfProcessIds) > 0:\n print('Process Exists | PID and other details are')\n for elem in listOfProcessIds:\n processID = elem['pid']\n processName = elem['name']\n processCreationTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(elem['create_time']))\n print((processID ,processName,processCreationTime ))\n \n if processName not in list_to_stand:\n \n single_stander = psutil.Process(processID)\n# single_stander.terminate()\n #print(\"Process:\"+processName+\" terminated\")\n if single_stander.connections() is not \"\":\n #playsound(\"alert.mp3\")\n for cons in single_stander.connections():\n database = r\"/data/data/com.termux/dar_kesim/webapp/asskicker.db\"\n\n \n conn = create_connection(database)\n with conn:\n task_2 = (str(processID), str(processName), str(processCreationTime), str(cons))\n\n # create tasks\n create_task(conn, task_2)\n watereye = open(\"/data/data/com.termux/dar_kesim/webapp/olupbiten.txt\",\"a\")\n \n watereye.write(\"Process:\"+str(processID)+ \" ProcessName:\" + str(processName) + \"Time:\"+ str(processCreationTime) + \"Connections:\" + str(cons) + \"\\n\")\n watereye.close()\n single_stander.terminate()\n #playsound(\"alert.mp3\")\n print(\"Process:\"+processName+\" terminated\")\n\n else:\n print('No Running Process found with given text')\n print('** Find running process by name using List comprehension **')\n\n\ndef whitelist():\n try:\n f=open(\"whitelist.domains\",\"r\") #Open external file to see what sites can pass our gateway\n filterlist=f.read()\n for line in filterlist.split():\n if(\";\" in line):\n print(\"Ignore comment\")\n else:\n try:\n os.system(\"bash whiter.sh \"+line) \n except:\n print (\"Can't load filter list\")\n# socket.inet_aton(line)\n # print(\"I'm an ipv4! \",line)\n #if i'm here cuz line is an ipv4 address\n # os.popen(\"iptables -I FORWARD -p ALL -m string --string \"+line+\" --algo kmp \"+timeout+\" -j ACCEPT\")\n # os.popen(\"iptables -I FORWARD -p ALL -m string --string \"+line+\" --algo kmp -j LOG --log-prefix 'WHITELIST-SDS'\")\n # except: # if i'm there cuz its not an ipv4 so a normal string\n # os.popen(\"iptables -I FORWARD -p tcp --match multiport --dports 80,443 -m string --string \"+line+\" --algo kmp \"+timeout+\" -j ACCEPT\")\n # os.popen(\"iptables -I FORWARD -p udp --dport 53 -m string --string \"+line+\" --algo kmp \"+timeout+\" -j ACCEPT\")\n # os.popen(\"iptables -I FORWARD -p ALL -m string --string \"+line+\" --algo kmp -j LOG --log-prefix 'WHITELIST-SDS'\")\n# print (\"added whitelist rule: \",line)\n while True:\n sokaktadilen()\n except:\n print (\"Can't load filter list\")\n\n\n\n\nif __name__ == \"__main__\":\n# whitelist()\n while True:\n get_chrome()\n","repo_name":"OgulcanUnveren/DarKesim","sub_path":"detergoogle.py","file_name":"detergoogle.py","file_ext":"py","file_size_in_byte":12365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13817476582","text":"import json\nimport openshift as oc\nimport time\nimport uuid\n\n# The Pod \"task_name\" is invalid: metadata.name: Invalid value: \"task_name\": a DNS-1123 subdomain must \n# consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric \n# character (e.g. 'example.com', regex used for validation is \n# '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')]\n\n#task_name = \"task-name\"\ntask_name = str(uuid.uuid1())\n\n#Exception OpenShiftPythonException\nwith open('template-start-up.json') as json_file:\n template = json.load(json_file)\n template[\"metadata\"][\"name\"] = task_name\n template[\"metadata\"][\"labels\"][\"task\"] = task_name\n output = oc.create(template)\n #print(output)\n #if output['status'] != 0:\n # print(\"Error\")\n\nc = oc.selector('pods', labels={\"task\": task_name, \"app\": \"start-up-app\"})\nobj = c.objects()\n\n# Error in 'actions' (list) - 'err' for status 'status'\nif len(obj) == 0:\n raise Exception(\"Error\")\nelif len(obj) > 1:\n raise Exception(\"Error\")\nprint(\"Created succesfully\")\ncontainer_info = obj[0].as_dict()\n\nn_tries = 0\ncomplete = False\nwhile n_tries < 20 and not complete:\n time.sleep(10)\n print(\"Another try\")\n obj[0].refresh()\n if 'status' in container_info and 'phase' in container_info['status'] and container_info['status']['phase'] == 'Succeeded':\n complete = True\n n_tries += 1\n\n#Validate the exit code shouyld be\n#container_info['status']['containerStatuses'][0]['state']['terminated']['exitCode']\n# if container_info['status']['exitCode'] != 0:\n# raise Exception(\"test\")\n\nc.delete()","repo_name":"MaastrichtU-CDS/ncdc-memorabel","sub_path":"v6_cluster_wrapper/ncdc_maastricht_wrapper/testing/draft-connection.py","file_name":"draft-connection.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"2246245175","text":"#Runebooks MUST be dyed using the top option of the Blue Tab\r\n#Must Create Buy Agent list and include Blank Scrolls\r\nhome_runebook_serial = 0x4100F093\r\nscroll_bag = 0x422611DF\r\nbeetle = 0x0282C31D\r\n\r\nblank_scrolls = 0x0EF3\r\nBuyAgent.Enable()\r\n\r\ndef scroll_dump():\r\n if Player.Weight > Player.MaxWeight - 200:\r\n home_runebook_properties = Items.GetPropStringList(home_runebook_serial)\r\n home_title = home_runebook_properties[5]\r\n home_title_list = home_title.split()\r\n home_rune = home_title_list[1]\r\n recall_response = 49 + int(home_rune)\r\n Items.UseItem(home_runebook_serial)\r\n Gumps.WaitForGump(89, 10000)\r\n Gumps.SendAction(89, recall_response)\r\n Misc.Pause(4000)\r\n for i in Player.Backpack.Contains:\r\n if i.ItemID == blank_scrolls:\r\n Items.Move(i, scroll_bag, 0)\r\n Misc.Pause(2000)\r\n \r\ndef overweight():\r\n if Player.Weight > Player.MaxWeight:\r\n for s in Player.Backpack.Contains:\r\n if s.ItemID == blankscrolls:\r\n if Player.Mount:\r\n Mobiles.UseMobile(Player.Serial)\r\n Misc.Pause(dragTime)\r\n Items.Move(s, beetle, 0)\r\n Misc.Pause(1200)\r\n if not Player.Mount:\r\n Mobiles.UseMobile(beetle)\r\n Misc.Pause(1200)\r\n home_runebook_properties = Items.GetPropStringList(home_runebook_serial)\r\n home_title = home_runebook_properties[5]\r\n home_title_list = home_title.split()\r\n home_rune = home_title_list[1]\r\n recall_response = 49 + int(home_rune)\r\n Items.UseItem(home_runebook_serial)\r\n Gumps.WaitForGump(89, 10000)\r\n Gumps.SendAction(89, recall_response)\r\n Misc.Pause(4000) \r\n \r\n\r\nwhile Player.IsGhost == False:\r\n for i in Player.Backpack.Contains:\r\n if i.ItemID == 0x22C5 and i.Hue == 2122:\r\n book = Items.GetPropStringList(i)\r\n book_title = book[5]\r\n title_list = book_title.split()\r\n runes = title_list[1]\r\n recalls = 1\r\n gump_action = 50\r\n while recalls < int(runes):\r\n Journal.Clear()\r\n Items.UseItem(i)\r\n Gumps.WaitForGump(89, 5000)\r\n while Gumps.HasGump() == False:\r\n Items.UseItem(i)\r\n Gumps.WaitForGump(89, 5000)\r\n Gumps.SendAction(89, gump_action)\r\n Misc.Pause(4000)\r\n while Journal.GetLineText('blocking') or Journal.GetLineText('teleport'):\r\n recalls += 1\r\n gump_action += 1\r\n Journal.Clear()\r\n Items.UseItem(i)\r\n Gumps.WaitForGump(89, 5000)\r\n while Gumps.HasGump() == False:\r\n Items.UseItem(i)\r\n Gumps.WaitForGump(89, 5000)\r\n Gumps.SendAction(89, gump_action)\r\n Misc.Pause(4000)\r\n Player.ChatSay(1, 'Vendor Buy')\r\n recalls += 1\r\n gump_action+= 1\r\n Misc.Pause(1000)\r\n scroll_dump()\r\n if Player.Weight > Player.MaxWeight:\r\n Spells.CastMagery('Bless')\r\n Target.WaitForTarget(10000, False)\r\n Target.Self()\r\n Misc.Pause(1000)\r\n \r\n Misc.Pause(300000)","repo_name":"rduckey/enhanced_razor_scripts","sub_path":"Crafting/tools_recaller.py","file_name":"tools_recaller.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"72022079811","text":"\"\"\"\n-------------------------------------------------------\nclassify\na function that classifies non G - H wrt to H\n-------------------------------------------------------\nAuthor: Dallas Fraser\nID: 110242560\nEmail: fras2560@mylaurier.ca\nVersion: 2014-09-10\n-------------------------------------------------------\n\"\"\"\n\ndef classification(hole, graph):\n '''\n a function that classifies non G - H wrt to H\n Parameters:\n hole: a list of vertices that form a hole\n graph: a networkx graph\n Returns:\n groups: {x:{0:[], ...}, y:{0:[],...}, r:[], w:[], spoke=[]}\n '''\n x = {}\n y = {}\n for node in hole:\n x[node] = []\n y[node] = []\n r = []\n w = []\n spokes = []\n for node in graph.nodes():\n if node not in hole:\n count = 0 \n bros = []\n for neighbor in graph.neighbors(node):\n if neighbor in hole:\n count += 1\n bros.append(neighbor)\n if count == len(hole):\n w.append(node)\n elif count == 1:\n x[bros[0]].append(node)\n elif count == 0:\n r.append(node)\n elif count == 2:\n if bros[0] < bros[1]:\n y[bros[0]].append(node)\n else:\n y[bros[1]].append(node)\n else:\n spokes.append(node)\n return {'x': x,\n 'y': y,\n 'w': w,\n 'r': r,\n 'spokes': spokes}\n\nimport unittest\nfrom graph.helper import make_cycle\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.graph = make_cycle(5)\n x = 5\n y = 6\n w = 7\n self.graph.add_node(x)\n self.graph.add_node(y)\n self.graph.add_node(w)\n \n self.graph.add_edge(1, x)\n self.graph.add_edge(2, y)\n self.graph.add_edge(3, y)\n for i in range(0, 4):\n self.graph.add_edge(i, w)\n\n def tearDown(self):\n pass\n\n def testName(self):\n result = classification(make_cycle(5), self.graph)\n print(result)\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()","repo_name":"fras2560/research","sub_path":"KiteEvenHoleFree/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"889266605","text":"from flask import Flask, render_template, request, redirect, session\n\n\napp = Flask(__name__)\napp.secret_key = '4d7c43d56bd44ced91c98799f6860a5e' # set a secret key for security purposes\n\n@app.route('/') #get: see form\ndef show_counter():\n return render_template('show.html') #show html pg (don't use for method response)\n #template: use html in flask\n\n@app.route('/count') #post: process form, send data\ndef create_counter():\n if \"counter\" in session:\n session[\"counter\"] += 1\n else:\n session[\"counter\"] = 1\n return redirect('/') #render show.html template\n\n\n@app.route('/reset') #post: process form, send data\ndef reset():\n session.clear()\n session[\"counter\"] = 1\n return redirect('/')\n\n\n\nif __name__==\"__main__\":\n app.run(debug=True)\n\n\n\n","repo_name":"hillarychang/dojo","sub_path":"coding_dojo/flask_first/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"75064653888","text":"import libpf\n\nuukey, size = libpf.hash_and_size('framing_format.txt')\nassert uukey == 'e57d4fdb99841cb565edad94cc89af1c3691fd34e6f7a8223ba321824e9a2a471c2b0caa43a3ede6fa7e49cb43bb023ca8f1c71e0455e8c30dc9c70b214d93c2'\nassert size == 4634\n\ntest_str = 'Hello, world! This is a test string for libpf and SnappyFd. Test test test test.'\n\nsfd = libpf.SnappyFd('test.sz', libpf.MODE_COMPRESS)\nsfd.write(test_str)\nsfd.close()\n\nsfd = libpf.SnappyFd('test.sz', libpf.MODE_DECOMPRESS)\nread_str = sfd.read()\nsfd.close()\nassert read_str == test_str\n","repo_name":"fyhuang/permafreeze","sub_path":"libpf/libpf_test.py","file_name":"libpf_test.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36482192633","text":"# import random\n\n# Column1 = list(range(1,10))\n# Column2 = list(random.randrange(4,8,2) for _ in range(len(Column1)))\n\n\n# from xlsxwriter import Workbook\n# workbook = Workbook('Ecl.xlsx')\n# Report_Sheet=workbook.add_worksheet()\n# Report_Sheet.write(0, 0, 'Column1')\n# Report_Sheet.write(0, 1, 'Column2')\n\n# for row_ind, row_value in enumerate(zip(Column1, Column2)):\n# print (row_ind, row_value)\n# for col_ind, col_value in enumerate(row_value):\n# Report_Sheet.write(row_ind + 1, col_ind, col_value)\n\n# workbook.close()\n\n\"\"\"\n빵형 exel 파일 읽기\n\"\"\"\n\nfrom openpyxl import Workbook\nwb = Workbook() # 새 워크북 생성\n\nws = wb.create_sheet() # 새로운 Sheet 기본 이름으로 생성\nws.title = \"MySheet\" # Sheet 이름 변경\nws.sheet_properties.tabColor = \"ff66ff\" \n\nws1 = wb.create_sheet(\"YourSheet\")\nws2 = wb.create_sheet(\"NewSheet\", 2)\n\nnew_ws = wb[\"NewSheet\"] # Dict 형태로 Sheet에 접근\nprint(wb.sheetnames) # 모든 Sheet 이름 확인\n\n# Sheet 복사\nnew_ws[\"A1\"] = \"Test\"\ntarget = wb.copy_worksheet(new_ws)\ntarget.title = \"Copied Sheet\"\n\nwb.save(\"sample.xlsx\")\n","repo_name":"jdj2261/Programming-study","sub_path":"python/Excel/01_Nado_excel.py","file_name":"01_Nado_excel.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"43946681671","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 12 09:54:00 2023\r\n\r\n@author: user\r\n\"\"\"\r\n\r\nfrom flask import Flask, request, jsonify\r\nfrom flask_restful import Resource, Api\r\nimport requests\r\nfrom config import *\r\n\r\napp = Flask(__name__)\r\napi = Api(app)\r\n\r\nclass Cartographie(Resource):\r\n def get(self, start, end):\r\n url = f\"https://api.openrouteservice.org/v2/directions/driving-car?start={start}&end={end}\"\r\n headers = {\"Authorization\": OPEN_ROUTE_API_KEY}\r\n response = requests.get(url, headers=headers)\r\n data = response.json()\r\n distance = data[\"features\"][0][\"properties\"][\"segments\"][0][\"distance\"]\r\n return {\"distance\": distance}\r\n\r\nclass BornesRecharge(Resource):\r\n def get(self, lat, lon):\r\n url = BORNES_API_URL + f\"{lat},{lon},10000\" # Cherche les bornes dans un rayon de 10 km\r\n response = requests.get(url)\r\n data = response.json()\r\n bornes = [{\"nom\": record[\"fields\"][\"n_station\"], \r\n \"adresse\": record[\"fields\"][\"ad_station\"], \r\n \"lat\": record[\"fields\"][\"geom\"][\"coordinates\"][1], \r\n \"lon\": record[\"fields\"][\"geom\"][\"coordinates\"][0]} \r\n for record in data[\"records\"]]\r\n return bornes\r\n # def get(self, lat, lon):\r\n # try:\r\n # # Vérifiez si la latitude et la longitude sont valides\r\n # if not (-90 <= lat <= 90) or not (-180 <= lon <= 180):\r\n # return {\"error\": \"Invalid latitude or longitude\"}, 400\r\n\r\n # url = BORNES_API_URL + f\"{lat},{lon},10000\" # Cherche les bornes dans un rayon de 10 km\r\n # response = requests.get(url)\r\n\r\n # # Vérifiez si la requête a réussi\r\n # if response.status_code == 200:\r\n # data = response.json()\r\n # records = data.get('records', []) # obtenir la liste des enregistrements\r\n\r\n # bornes = [\r\n # {\r\n # \"nom\": record.get(\"fields\", {}).get(\"n_station\", \"N/A\"), \r\n # \"adresse\": record.get(\"fields\", {}).get(\"ad_station\", \"N/A\"), \r\n # \"lat\": record.get(\"fields\", {}).get(\"geom\", {}).get(\"coordinates\", [None, None])[1], \r\n # \"lon\": record.get(\"fields\", {}).get(\"geom\", {}).get(\"coordinates\", [None, None])[0]\r\n # } for record in records\r\n # ]\r\n\r\n # return bornes, 200 # le code de statut HTTP pour une réponse réussie\r\n # else:\r\n # return {\"error\": \"API request failed with status code \" + str(response.status_code)}, 500\r\n\r\n # except Exception as e:\r\n # return {\"error\": f\"An unexpected error occurred: {str(e)}\"}, 500\r\n\r\napi.add_resource(Cartographie, '/cartographie//')\r\napi.add_resource(BornesRecharge, '/bornes//')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, port=5001)\r\n","repo_name":"Bineta99/Vehicules","sub_path":"rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"1905045487","text":"\"\"\"\n@author: Zongyi Li\nThis file is the Fourier Neural Operator for 1D problem such as the (time-independent) Burgers equation discussed in Section 5.1 in the [paper](https://arxiv.org/pdf/2010.08895.pdf).\n\"\"\"\n\nfrom typing import Tuple\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n#from torch.nn.parameter import Parameter\n#import matplotlib.pyplot as plt\n\n#import operator\n#from functools import reduce\n#from functools import partial\n#from timeit import default_timer\nfrom .utilities3 import *\nfrom copy import deepcopy as dcpy\n\ntorch.manual_seed(0)\nnp.random.seed(0)\n\nfrom torch.utils.checkpoint import checkpoint\ncheckpointed = True\n\ndef ckpt(f,arg1,arg2=None,arg3=None,checkpointed = checkpointed):\n if checkpointed:\n if arg2 == None and arg3 == None:\n return checkpoint(f,arg1)\n elif arg3 == None:\n return checkpoint(f,arg1,arg2)\n else:\n return checkpoint(f,arg1,arg2,arg3)\n else:\n if arg2 == None and arg3 == None:\n return f(arg1)\n elif arg3 == None:\n return f(arg1,arg2)\n else:\n return f(arg1,arg2,arg3)\n\n################################################################\n# 1d fourier layer\n################################################################\nclass SpectralConv1d(nn.Module):\n def __init__(self, in_channels, out_channels, modes1):\n super(SpectralConv1d, self).__init__()\n\n \"\"\"\n 1D Fourier layer. It does FFT, linear transform, and Inverse FFT. \n \"\"\"\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1\n\n self.scale = (1 / (in_channels*out_channels))\n self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, dtype=torch.cfloat))\n\n # Complex multiplication\n def compl_mul1d(self, input, weights):\n # (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)\n if input.size(-1)box\", input, weights)\n\n def forward(self, x):\n batchsize = x.shape[0]\n #Compute Fourier coeffcients up to factor of e^(- something constant)\n x_ft = ckpt(torch.fft.rfft,x)\n\n # Multiply relevant Fourier modes\n out_ft = torch.zeros(batchsize, self.out_channels, x.size(-1)//2 + 1, device=x.device, dtype=torch.cfloat)\n out_ft[:, :, :self.modes1] = ckpt(self.compl_mul1d,x_ft[:, :, :self.modes1], self.weights1)\n\n #Return to physical space\n x = torch.fft.irfft(out_ft,x.size(-1))\n return x\n\nclass FNO1d(nn.Module):\n def __init__(self, modes, width,inp_dim=2,out_dim=1,ffd_dim=128,transpose_req=True,num_layers=4):\n super(FNO1d, self).__init__()\n\n \"\"\"\n The overall network. It contains 4 layers of the Fourier layer.\n 1. Lift the input to the desire channel dimension by self.fc0 .\n 2. 'num_layers' layers of the integral operators u' = (W + K)(u).\n W defined by self.w; K defined by self.conv .\n 3. Project from the channel space to the output space by self.fc1 and self.fc2 .\n \n input: the solution of the initial condition and location (a(x), x)\n input shape: (batchsize, x=s, c=2)\n output: the solution of a later timestep\n output shape: (batchsize, x=s, c=1)\n \"\"\"\n\n self.transpose_req = transpose_req\n\n self.modes1 = modes\n self.width = width\n self.fc0 = nn.Linear(inp_dim, self.width)\n\n conv_block = SpectralConv1d(self.width, self.width, self.modes1)\n self.conv_layers = nn.ModuleList([dcpy(conv_block) for _ in range(num_layers)])\n w_block = nn.Conv1d(self.width, self.width, 1)\n self.w_layers = nn.ModuleList([dcpy(w_block) for _ in range(num_layers)])\n self.num_layers = num_layers\n\n self.fc1 = nn.Linear(self.width, ffd_dim)\n self.fc2 = nn.Linear(ffd_dim, out_dim)\n\n def forward(self, x):\n \n if not self.transpose_req:\n x = self.fc0(x.transpose(-1,-2))\n else:\n x = self.fc0(x)\n\n x = x.transpose(-1,-2)\n\n for i in range(self.num_layers):\n x_ = ckpt(self.conv_layers[i],x) + ckpt(self.w_layers[i],x)\n x_ = F.relu(x_)\n \n x = x.transpose(-1,-2)\n x = self.fc1(x)\n x = F.relu(x)\n\n\n if not self.transpose_req:\n x = self.fc2(x).transpose(-1,-2)\n else:\n x = self.fc2(x)\n\n return x\n","repo_name":"Vbansal21/Transformer-X","sub_path":"scripts/fourier_1d.py","file_name":"fourier_1d.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"25707031757","text":"#!/bin/python3\r\n\"\"\"\r\n# Description\r\nClient classes\r\n\r\n# Authors:\r\n- Seth Giovanetti\r\n\"\"\"\r\n\r\nimport byteutil\r\nimport crypto\r\nimport net\r\nimport socket\r\n\r\nfrom Codes import Code, printCodes\r\nfrom pprint import pprint\r\nimport prompt\r\n# import socketserver\r\nfrom listener import tcpListen\r\nimport threading\r\n\r\n\r\ndef formatChatMessage(id, msg, id2=\"\"):\r\n return \"{ident:>{size}} {msg}\".format(\r\n size=max(len(id), len(id2)),\r\n ident=\"[{id}]\".format(id=id),\r\n msg=(msg if msg and msg[-1] != \"\\n\" else msg[:-1])\r\n )\r\n\r\n\r\nclass BaseClient():\r\n\r\n \"\"\"A basic client with attributes\r\n\r\n Attributes:\r\n id (str): Unique client ID\r\n \"\"\"\r\n\r\n def __init__(self, id):\r\n self.id = id\r\n self._secret = None\r\n self.address = tuple()\r\n self.session_partner = None\r\n self.session_id = None\r\n\r\n @property\r\n def availible(self):\r\n return self.session_partner is None\r\n\r\n @property\r\n def secret(self):\r\n \"\"\"\r\n Returns:\r\n str: The client's secret key, if we know it. \r\n \"\"\"\r\n if self._secret is None:\r\n self.loadSecret()\r\n return self._secret\r\n\r\n def loadSecret(self, gen_on_fail=False):\r\n \"\"\"Load our stored secret key from crypto\r\n\r\n Args:\r\n gen_on_fail (bool, optional): Generate and save a new key if we don't already have one stored.\r\n\r\n Raises:\r\n KeyError: We can't generate a new key, and we don't have one stored.\r\n \"\"\"\r\n try:\r\n self._secret = crypto.getKey(self.id)\r\n print(\"Loaded stored key for ID \" + self.id)\r\n except KeyError as e:\r\n if gen_on_fail:\r\n self.genSecret()\r\n else:\r\n raise e\r\n\r\n def genSecret(self):\r\n \"\"\"Generate a new secret key and store it in crypto\r\n \"\"\"\r\n new_secret = crypto.cRandom(128)\r\n crypto.storeKey(new_secret, self.id)\r\n print(\"New key generated for client ID \" + self.id)\r\n self._secret = new_secret\r\n\r\n\r\nclass RunnableClient(BaseClient):\r\n\r\n \"\"\"A stateful client with user interaction\r\n\r\n Attributes:\r\n server (Server): A BaseServer to connect our client to\r\n server_tcp_port (int): The port number of our TCP connection\r\n tcp_socket (Socket): The TCP socket cooresponding to the TCP connection with the server\r\n token (str): Session token for authentication\r\n \"\"\"\r\n\r\n # def __init__(self, id):\r\n # prompt.Interactable.__init__(self, start=False)\r\n # # super().__init__(self)\r\n # BaseClient.__init__(self, id)\r\n\r\n # Connect, login, run\r\n\r\n def run(self, server):\r\n \"\"\"Run client interactively.\r\n Load our secret, generating if needed.\r\n Login, run prompt until user exits, then disconnect.\r\n\r\n Args:\r\n server (BaseServer): BaseServer to connect our client to\r\n \"\"\"\r\n self.loadSecret(gen_on_fail=True)\r\n self.login(server)\r\n self.prompt()\r\n self.disconnect()\r\n\r\n def sendTCP(self, message):\r\n assert self.tcp_socket\r\n return net.sendTCP(\r\n self.tcp_socket,\r\n byteutil.message2bytes(message)\r\n )\r\n\r\n def login(self, server):\r\n \"\"\"Attempt to login to the server and establish a TCP connection.\r\n This is the UDP handshake process.\r\n\r\n Args:\r\n server (BaseServer): Server target\r\n\r\n Raises:\r\n PermissionError: Authentication failure\r\n \"\"\"\r\n # Store our associated server\r\n self.server = server\r\n\r\n # Prepare UDP socket to send and recieve\r\n sock = net.newUDPSocket()\r\n\r\n # src_address = (net.getOwnIP(), 0,)\r\n # sock.bind(src_address)\r\n # print(\"Socket open on\", src_address)\r\n\r\n # Send UDP HELLO to server\r\n serv_address_udp = (self.server.ip, self.server.port_udp,)\r\n net.sendUDP(\r\n sock,\r\n byteutil.message2bytes([\r\n Code.HELLO,\r\n self.id\r\n ]),\r\n serv_address_udp\r\n )\r\n\r\n # Expect CHALLENGE from server\r\n print(\"Awaiting CHALLENGE from server\")\r\n response, serv_address_udp = net.awaitUDP(sock, net.UDP_MSG_SIZE)\r\n code, rand = byteutil.bytes2message(response)\r\n\r\n assert code == Code.CHALLENGE.value, \"Got non-challenge code {}\".format(\r\n code)\r\n\r\n print(\"Challenge rand:\", rand)\r\n\r\n # Decrypt challenge with our secret\r\n response = crypto.a3(rand, self.secret)\r\n\r\n # Send RESPONSE to server\r\n net.sendUDP(\r\n sock,\r\n byteutil.message2bytes([\r\n Code.RESPONSE,\r\n self.id,\r\n response\r\n ]),\r\n serv_address_udp\r\n )\r\n\r\n # Expect AUTH_SUCCESS or AUTH_FAIL from server\r\n print(\"Awaiting AUTH result from server\")\r\n response, serv_address_udp = net.awaitUDP(sock, net.UDP_MSG_SIZE)\r\n # rest includes raw int data here, don't stringify\r\n code, *rest = byteutil.bytes2bytemsg(response)\r\n\r\n if code == Code.AUTH_FAIL.value:\r\n raise PermissionError(\r\n \"Server rejected key authentication with code\", code)\r\n\r\n assert code == Code.AUTH_SUCCESS.value, \"Got non-auth code {}\".format(\r\n code)\r\n (token, server_tcp_port) = rest\r\n\r\n print(\"Closing UDP socket\")\r\n sock.close()\r\n\r\n # Establish TCP Connection\r\n print(\"Establishing TCP connection with cookie\")\r\n self.token = token\r\n\r\n # Create a TCP address with our old IP and our new port\r\n (server_ip, udp_port) = serv_address_udp\r\n serv_address_tcp = (server_ip, int(server_tcp_port))\r\n\r\n # print(\"Starting up TCP listener\")\r\n # self.tcp_server = socketserver.TCPServer(src_address, TCPListener)\r\n # self.tcp_server.master = self\r\n # self.tcp_thread = threading.Thread(daemon=True, target=self.tcp_server.serve_forever)\r\n # self.tcp_thread.start()\r\n\r\n print(\"Connecting TCP socket\", serv_address_tcp)\r\n # self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n # serv_address_tcp = (server_ip, self.server_tcp_port)\r\n # self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n try:\r\n self.tcp_socket = net.newTCPSocket()\r\n self.tcp_socket.connect(serv_address_tcp)\r\n except ConnectionRefusedError as e:\r\n print(\"Could not connect!\")\r\n print(serv_address_tcp)\r\n raise\r\n\r\n self.sendTCP([\r\n Code.CONNECT,\r\n token\r\n ])\r\n # Expect CONNECTED\r\n\r\n message = net.awaitTCP(self.tcp_socket, 2**16)\r\n code, *rest = byteutil.bytes2message(message)\r\n assert code == Code.CONNECTED.value, \"Got non-connect code {}\".format(\r\n code)\r\n\r\n print(\"Logged in successfully.\")\r\n\r\n def _listenUntilExit():\r\n tcpListen(self.tcp_socket, self.onTCP)\r\n self.disconnect(None)\r\n\r\n tcp_thread = threading.Thread(\r\n daemon=True, target=_listenUntilExit)\r\n tcp_thread.start()\r\n\r\n def disconnect(self, *args):\r\n \"\"\"Disconnect from server and exit\r\n\r\n Args:\r\n *args: None\r\n\r\n Raises:\r\n KeyboardInterrupt: Disconnect signal\r\n \"\"\"\r\n self.sendTCP([\r\n Code.DISCONNECT\r\n ])\r\n self.p.cmd_exit(None)\r\n raise KeyboardInterrupt\r\n\r\n # TCP networking\r\n\r\n # def tcpListener(self, sock, callback):\r\n # \"\"\"Listen for TCP messages on a socket and pass messages to a callback function.\r\n # This is a blocking call in an infinite loop; run this in a thread.\r\n\r\n # Args:\r\n # sock (socket): TCP socket to listen\r\n # callback (func): Callback function with args (socket, code, args, source_address,)\r\n # \"\"\"\r\n # # self.listening = threading.Event()\r\n # sock.settimeout(None)\r\n # while True:\r\n # message = sock.recv(net.MSG_SIZE)\r\n # assert message\r\n\r\n # source_address = sock.getpeername()\r\n\r\n # print(\"┌ Recieved TCP message\")\r\n # print(\"│ Source: {}:{}\".format(*source_address))\r\n # print(\"│ ┌Message (bytes): {}\".format(message))\r\n # print(\"└ └Message (print): {}\".format(\r\n # byteutil.formatBytesMessage(message)))\r\n\r\n # code, *rest = byteutil.bytes2message(message)\r\n # callback(sock, code, rest, source_address)\r\n\r\n def onTCP(self, connection, code, args, source_address):\r\n \"\"\"Callback to handle TCP messages\r\n\r\n Args:\r\n connection (socket): TCP socket of incomming message\r\n code (Code): The protocol code of the message\r\n args (list): The non-code parts of the message\r\n source_address (ip, port): INET address of the message source\r\n \"\"\"\r\n #print(\"args\"+ \" \".join(str(x) for x in args));\r\n\r\n if code == Code.CHAT_STARTED.value:\r\n (sessid, clientid,) = args\r\n self.session_partner = clientid\r\n self.session_id = sessid\r\n\r\n print(\"Chat started with user\", clientid)\r\n\r\n # try:\r\n # self.ps.app.exit()\r\n # except AssertionError:\r\n # pass\r\n # print('Exited.')\r\n # self.promptChat()\r\n\r\n elif code == Code.END_NOTIF.value:\r\n (sessid,) = args\r\n self.session_partner = None\r\n self.session_id = None\r\n\r\n print(\"Chat terminated.\")\r\n\r\n # self.p.pstr = \"> \".format(self.id)\r\n\r\n # self.ps.app.exit()\r\n # self.prompt()\r\n\r\n elif code == Code.UNREACHABLE.value:\r\n print(\"Cannot connect.\")\r\n elif code == Code.CHAT.value:\r\n (message,) = args\r\n print(formatChatMessage(self.session_partner, message, self.id))\r\n elif code == Code.HISTORY_RESP.value:\r\n # print(\"client got args\" + repr(args))\r\n (client_id_b, message, *rest) = args\r\n # print(\"msg only: \"+ message)\r\n # print(\"msg split: \" + messagef)\r\n # print(\"final hist msg: \" + formatChatMessage(client_id_b, message))\r\n print(formatChatMessage(client_id_b, message))\r\n else:\r\n print(\"No behavior for TCP code\", code)\r\n\r\n def onChatInput(self, inp):\r\n if inp.lower() == \"end chat\":\r\n self.sendTCP([\r\n Code.END_REQUEST\r\n ])\r\n elif inp:\r\n self.sendTCP([\r\n Code.CHAT,\r\n inp\r\n ])\r\n print(formatChatMessage(self.id, inp, self.session_partner))\r\n # User interactivity\r\n\r\n def bottomToolbar(self):\r\n if self.session_partner:\r\n return \"[{}] Chatting with user '{}'. Send 'end chat' to disconnect.\".format(self.id, self.session_partner)\r\n else:\r\n return \"[{}] Type 'help' for help. 'chat [user]' to initiate chat.\".format(self.id)\r\n\r\n def prompt(self):\r\n \"\"\"Interactive prompt\r\n \"\"\"\r\n from prompt_toolkit import PromptSession\r\n # from prompt_toolkit.completion import WordCompleter\r\n from prompt_toolkit.patch_stdout import patch_stdout\r\n \r\n self.p = p = prompt.Prompt()\r\n p.pstr = \"{} > \".format(self.id)\r\n p.registerCommandsFromNamespace(self, \"cmd_\")\r\n p.registerCommand(\r\n \"codes\",\r\n printCodes,\r\n helpstr=\"Print protocol codes\"\r\n )\r\n p.registerCommand(\r\n \"vars\",\r\n lambda *a: pprint(vars(self)),\r\n helpstr=\"Show own variables\"\r\n )\r\n p.registerCommand(\r\n \"disconnect\",\r\n self.disconnect,\r\n helpstr=\"Disconnect session\"\r\n )\r\n\r\n # prompt_completer = WordCompleter(self.p.commands.keys())\r\n self.ps = PromptSession(\r\n # completer=prompt_completer,\r\n # reserve_space_for_menu=3,\r\n bottom_toolbar=self.bottomToolbar,\r\n erase_when_done=True\r\n )\r\n # self.prompt_event = threading.Event()\r\n\r\n # We implement our own prompt system that differentiates between\r\n # chat input and command input. \r\n try:\r\n while True:\r\n with patch_stdout():\r\n rawin = self.ps.prompt(self.p.pstr) # prompt(self.pstr)\r\n try:\r\n if self.session_partner:\r\n self.onChatInput(rawin)\r\n else:\r\n self.p.handleCommand(rawin)\r\n except BrokenPipeError as e:\r\n self.login(self.server)\r\n except (KeyboardInterrupt, EOFError) as e:\r\n # Catch Ctrl-C, Ctrl-D, and exit.\r\n print(\"User interrupt.\")\r\n finally:\r\n # Cleanup\r\n pass\r\n\r\n # def promptChat(self):\r\n # \"\"\"Interactive prompt\r\n # \"\"\"\r\n # from prompt_toolkit import PromptSession\r\n # from prompt_toolkit.patch_stdout import patch_stdout\r\n \r\n # self.ps = PromptSession(\r\n # bottom_toolbar=self.bottomToolbar\r\n # )\r\n # try:\r\n # while True:\r\n # with patch_stdout():\r\n # rawin = self.ps.prompt(self.p.pstr) # prompt(self.pstr)\r\n # self.onChatInput(rawin)\r\n # except (KeyboardInterrupt, EOFError) as e:\r\n # # Catch Ctrl-C, Ctrl-D, and exit.\r\n # print(\"User interrupt.\")\r\n # finally:\r\n # # Cleanup\r\n # pass\r\n\r\n def cmd_say(self, *args):\r\n \"\"\"Send a CHAT message\r\n\r\n Args:\r\n Message\r\n \"\"\"\r\n print(\"tcp say:\", args)\r\n self.sendTCP([\r\n Code.CHAT,\r\n \" \".join(args)\r\n ])\r\n\r\n def cmd_history(self, *args):\r\n \"\"\"Request chat history between you and another user.\r\n Args:\r\n Other user id\r\n \"\"\"\r\n if len(args) == 0:\r\n print(\"History: No user specified.\")\r\n return\r\n (client_id_b,) = args\r\n print(\"History for \" + client_id_b + \": \\n\")\r\n self.sendTCP([\r\n Code.HISTORY_REQ,\r\n client_id_b\r\n ])\r\n\r\n\r\n def cmd_chat(self, *args):\r\n \"\"\"Start a chat session with another user.\r\n\r\n Args: client-id\r\n \"\"\"\r\n (client_id_b,) = args\r\n self.sendTCP([\r\n Code.CHAT_REQUEST,\r\n client_id_b\r\n ])\r\n\r\n def cmd_panic(self, *args):\r\n \"\"\"\r\n Terminate without cleaning up.\r\n \"\"\"\r\n import os\r\n os.abort()\r\n","repo_name":"OscarC1/ServerBasedChat-CS4390","sub_path":"src/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":14899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"19862705514","text":"from .forms import ContactForm\r\nfrom django.shortcuts import render, redirect\r\nfrom django.core.mail import EmailMessage, BadHeaderError\r\nfrom django.http import HttpResponse\r\nfrom django.template.loader import get_template\r\nfrom django.conf import settings\r\n\r\n\r\ndef home(request):\r\n form = ContactForm(request.POST or None)\r\n if request.method == \"POST\":\r\n if form.is_valid():\r\n name = form.cleaned_data['name']\r\n email = form.cleaned_data['email']\r\n phone_number = form.cleaned_data['phone_number']\r\n subject = form.cleaned_data['subject']\r\n message = form.cleaned_data['message']\r\n from_email = settings.DEFAULT_FROM_EMAIL\r\n to = settings.DEFAULT_TO_EMAIL\r\n template = get_template('portfolio/contact_template.txt')\r\n context = {\r\n 'name': name,\r\n 'phone_number': phone_number,\r\n 'email': email,\r\n 'message': message,\r\n }\r\n content = template.render(context)\r\n try:\r\n EmailMessage(subject, content, from_email, [to], headers={'Reply-To': email}).send()\r\n except BadHeaderError:\r\n return HttpResponse('Invalid header found.')\r\n form.save()\r\n return redirect('success')\r\n return render(request, \"portfolio/Home.html\", {'form': form})\r\n\r\n\r\ndef success_view(request):\r\n return render(request, \"portfolio/Success.html\", {})\r\n\r\n\r\n","repo_name":"bruce619/portfolioapp","sub_path":"portfolio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"22910908568","text":"from __future__ import annotations\n\nimport json\nimport pathlib\n\nimport torch\n\n\ndef load_nn(path: pathlib.Path) -> torch.nn.Module:\n info = json.loads(path.read_text(encoding=\"utf-8\"))\n\n layers = []\n\n for layer_info in info[\"layers\"]:\n if layer_info[\"kind\"] == \"Linear\":\n layer = torch.nn.Linear(layer_info[\"inputSize\"], layer_info[\"outputSize\"])\n weights = torch.tensor(layer_info[\"weights\"])\n biases = torch.tensor(layer_info[\"biases\"])\n with torch.no_grad():\n layer.weight.copy_(weights)\n layer.bias.copy_(biases)\n else:\n assert layer_info[\"kind\"] == \"ReLU\"\n layer = torch.nn.ReLU()\n layers.append(layer)\n\n return torch.nn.Sequential(*layers)\n","repo_name":"udsdepend/cav22-mogym-artifact","sub_path":"mogym/load_nn.py","file_name":"load_nn.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"41461635491","text":"import sys\n\nN, K = map(int, sys.stdin.readline().split())\nOriginal = [i for i in range(1, N+1)]\nresult = list()\ngap = K - 1\n\nfor i in range(N) :\n if len(Original) > gap :\n result.append(Original.pop(gap))\n gap += K - 1\n\n elif len(Original) <= gap :\n gap = gap % len(Original)\n result.append(Original.pop(gap))\n gap += K - 1\n\nprint(\"<\", end = '') # end에 빈 문자열을 지정하면 다음 번 출력이 바로 뒤에 나옴\nfor i in result :\n if i == result[-1] : print(i, end = \"\")\n else : print(\"%d,\"%(i), end = ' ') # end에 공백을 한 칸 지정하며 다음 번 출력이 한 칸 띄어져서 나옴\nprint(\">\")\n\n","repo_name":"algorithm-study-sopt/segonPark","sub_path":"3주차/baekjoon1158.py","file_name":"baekjoon1158.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"29245656733","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.linear_model import LinearRegression\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split as tts\nfrom sklearn.metrics import classification_report, confusion_matrix, recall_score, f1_score, precision_score, \\\n roc_auc_score, roc_curve, auc\nfrom lightgbm.sklearn import LGBMClassifier\nimport six\nimport sys\nsys.modules['sklearn.externals.six'] = six\nfrom imblearn.over_sampling import SMOTENC, SMOTE, BorderlineSMOTE, ADASYN, SVMSMOTE\nfrom imblearn.ensemble import EasyEnsembleClassifier\nimport eli5\nfrom eli5.sklearn import PermutationImportance\nimport pickle\n\nmissing_values = [\"n/a\", \"na\", \"--\", \"NONE\", \"None\", \"none\", \"NA\", \"N/A\", 'inf', '-inf', '?', 'Null', 'NULL']\ntrain_data = pd.read_csv(\"E:\\chirag\\Datasets\\Job change\\Aug_train.csv\", na_values=missing_values)\ntrain_data.drop(['enrollee_id', 'city'], 1, inplace=True)\n\nprint(train_data.company_size.value_counts())\ntrain_data['company_size'] = train_data['company_size'].replace('10/49', np.nan)\nprint(\"==============================\")\nprint(train_data.company_size.value_counts())\n\nto_LabelEncode = train_data[['gender', 'relevent_experience',\n 'enrolled_university', 'education_level', 'major_discipline',\n 'experience', 'company_size', 'company_type', 'last_new_job']]\n\nle = LabelEncoder()\ntrain_temp = to_LabelEncode.astype(\"str\").apply(le.fit_transform)\ntrain_Label_encode = train_temp.where(~to_LabelEncode.isna(), to_LabelEncode)\n\ntrain_data.drop(['gender', 'relevent_experience', 'enrolled_university', 'education_level',\n 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job'], 1, inplace=True)\n\ntrain_data = train_Label_encode.join(train_data)\nprint(train_data)\n\nlr = LinearRegression()\nmice_imputer = IterativeImputer(random_state=42, estimator=lr, max_iter=10, n_nearest_features=2,\n imputation_order='roman')\ncleaned_train_data = mice_imputer.fit_transform(train_data)\n\ncleaned_train_data = pd.DataFrame(cleaned_train_data)\ncleaned_train_data.columns = ['gender', 'relevent_experience', 'enrolled_university', 'education_level',\n 'major_discipline',\n 'experience', 'company_size', 'company_type', 'last_new_job', 'city_development_index',\n 'training_hours', 'target']\n\nprint(cleaned_train_data)\n\nX = cleaned_train_data.drop('target', 1)\ny = cleaned_train_data.target\n\nX_train, X_test, y_train, y_test = tts(X, y, test_size=0.25, random_state=42)\n\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\nsvm_smote = SVMSMOTE(sampling_strategy='minority', random_state=42, k_neighbors=5)\nX_svm_smote, y_svm_smote = svm_smote.fit_resample(X, y)\n\nX_train_svm, X_test_svm, y_train_svm, y_test_svm = tts(X_svm_smote, y_svm_smote, test_size=0.25, random_state=42)\n\nsc = StandardScaler()\nX_train_svm = sc.fit_transform(X_train_svm)\nX_test_svm = sc.transform(X_test_svm)\n\n\ndef evaluate(model, X_test, y_test):\n y_pred = model.predict(X_test)\n errors = abs(y_pred - y_test)\n print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))\n print(classification_report(y_test, y_pred))\n print(confusion_matrix(y_test, y_pred))\n print('Recall Score = ', recall_score(y_test, y_pred))\n print('Precision Score = ', precision_score(y_test, y_pred))\n print('F1 score = ', f1_score(y_test, y_pred))\n\n return evaluate\n\n\ndef train_auc_roc_curve(model, X_train, y_train):\n base_fpr, base_tpr, base_threshold = roc_curve(y_train, model.predict(X_train))\n plt.plot([0, 1])\n plt.plot(base_fpr, base_tpr)\n print(\"auc score :\", auc(base_fpr, base_tpr))\n\n return train_auc_roc_curve\n\n\neasy_lgbm = EasyEnsembleClassifier(base_estimator=LGBMClassifier(random_state=42), n_estimators=250, n_jobs=1,\n random_state=42, replacement=True,\n sampling_strategy='auto', verbose=0,\n warm_start=True)\neasy_lgbm.fit(X_train_svm, y_train_svm)\nevaluate(easy_lgbm, X_test_svm, y_test_svm)\n\nprint(classification_report(y_train_svm, easy_lgbm.predict(X_train_svm)))\nprint(confusion_matrix(y_train_svm, easy_lgbm.predict(X_train_svm)))\nprint('Recall Score = ', recall_score(y_train_svm, easy_lgbm.predict(X_train_svm)))\nprint('Precision Score = ', precision_score(y_train_svm, easy_lgbm.predict(X_train_svm)))\n\nprint(f1_score(y_train_svm, easy_lgbm.predict(X_train_svm)))\nprint(f1_score(y_test_svm, easy_lgbm.predict(X_test_svm)))\n\neli5_permutation = PermutationImportance(estimator=easy_lgbm, scoring='f1', random_state=42, n_iter=5)\neli5_permutation.fit(X_test_svm, y_test_svm)\neli5_permutation.feature_importances_.T.reshape(-1, 1)\n\nfeature_importance_with_eli5 = pd.DataFrame(np.hstack((np.array([X.columns[0:]]).T,\n eli5_permutation.feature_importances_.T.reshape(-1, 1))),\n columns=['feature', 'importance'])\nfeature_importance_with_eli5['importance'] = pd.to_numeric(feature_importance_with_eli5['importance'])\nfeature_importance_with_eli5.sort_values(by='importance', ascending=False)\n\nfig = plt.figure(figsize=(15, 8))\nplt.xticks(fontsize=15)\nplt.yticks(fontsize=15)\nsns.barplot(x='importance', y='feature', data=feature_importance_with_eli5,\n order=feature_importance_with_eli5.sort_values('importance', ascending=False).feature)\n\n\npickle.dump(easy_lgbm, open('EasyEnsembleClassifier_with_LGBMClassifier_as_base_estimator.pickle', 'wb'))\n","repo_name":"ChiragAgrawalDataScientist/Job-Change-of-Data-Scientist","sub_path":"HRmodel.py","file_name":"HRmodel.py","file_ext":"py","file_size_in_byte":5840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24163547334","text":"from django.shortcuts import render, redirect\nfrom .models import Persona\nfrom .models import Gato\nfrom .forms import PersonaForm, GatoForm\nfrom django.contrib import messages\n\n\n\n\n# Create your views here.\ndef home(request):\n\t# accedemos al objeto que contiene los datos de la base\n\t# el método all traerá todos los vehículos que estan en la tabla, es como el select\n\tpersonas= Persona.objects.all()\n\t#ahora crearemos un diccionario donde pasaremos los datos del vehículo al template\n\t#ahora se agrega para enviarlo al template y se manda la variable datos que es donde queda el diccionario\n\treturn render(request, 'home.html',context={'datos':personas})\n\n\n\n\ndef index(request):\n return render(request, 'index.html')\n\ndef gale(request):\n return render(request, 'gale.html')\n\n\ndef login(request):\n return render(request, 'login.html')\n\ndef nosotros(request):\n return render(request, 'nosotros.html')\n\ndef mantenedor(request):\n return render(request, 'mantenedor.html')\n\ndef catalogo(request):\n gatos= Gato.objects.all()\n return render(request, 'catalogo.html',context={'datos':gatos})\n\ndef ver_gatos(request):\n gatos= Gato.objects.all()\n return render(request, 'core/ver_gatos.html',context={'gatos':gatos})\n \n \ndef ver_personas(request):\n personas= Persona.objects.all()\n return render(request, 'core/ver_personas.html',context={'personas':personas})\n\n\n#def form_vehiculo(request):\n#\treturn render(request,'core/form_vehiculo.html')\ndef form_persona(request):\n if request.method=='POST': \n persona_form = PersonaForm(request.POST)\n if persona_form.is_valid():\n persona_form.save()\n return redirect('home')\n else:\n persona_form= PersonaForm()\n return render(request, 'core/form_persona.html', {'persona_form': persona_form})\n\n\n\ndef form_gato(request):\n if request.method=='POST': \n gato_form = GatoForm(request.POST)\n if gato_form.is_valid():\n gato_form.save()\n return redirect('catalogo')\n else:\n gato_form= GatoForm()\n return render(request, 'core/form_gato.html', {'gato_form': gato_form})\n\n\ndef form_mod_gato(request,id):\n gato = Gato.objects.get(id_chip=id)\n\n datos ={\n 'form': GatoForm(instance=gato) \n }\n if request.method== 'POST':\n formulario = GatoForm(data=request.POST, instance = gato)\n if formulario.is_valid:\n formulario.save()\n messages.success(request, \"modificado exitosamente\")\n return redirect('ver_gatos')\n return render(request, 'core/form_mod_gato.html', datos)\n\n\ndef form_borrar_gato(request,id):\n gato= Gato.objects.get(id_chip=id)\n gato.delete()\n messages.success(request, \"eliminado exitosamente\")\n return redirect('ver_gatos')\n \n\ndef form_mod_persona(request,id):\n persona = Persona.objects.get(rut=id)\n\n datos ={\n 'form': PersonaForm(instance=persona) \n }\n if request.method== 'POST':\n formulario = PersonaForm(data=request.POST, instance = persona)\n if formulario.is_valid:\n formulario.save()\n messages.success(request, \"modificado exitosamente\")\n return redirect('ver_personas')\n return render(request, 'core/form_mod_persona.html', datos)\n\n\ndef form_borrar_persona(request,id):\n persona= Persona.objects.get(rut=id)\n persona.delete()\n messages.success(request, \"eliminado exitosamente\")\n return redirect('ver_personas')\n \n","repo_name":"Proyecto-Teamcats/TeamCatsUltimate","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37329023340","text":"# Write a Python Program to Get Numbers Divisible by Fifteen From a List Using an Anonymous Function.\r\n\r\nfrom unittest import result\r\n\r\n\r\nnum_list = [45, 55, 60, 37, 100, 105, 220]\r\n\r\n# Use Anonymous Function to Filter\r\nresult = list(filter(lambda x: (x % 15 == 0), num_list))\r\nprint('Numbers Divisible by 15 are', result)","repo_name":"sagargoswami2001/Python-Basic-Programs-2","sub_path":"Anonymous_Function.py","file_name":"Anonymous_Function.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"3789449791","text":"\n\n#Source: https://machinelearningmastery.com/how-to-reduce-overfitting-with-dropout-regularization-in-keras/\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\n\nfrom sklearn.metrics import mean_squared_error\n\n\nfrom matplotlib import pyplot\n \n\nimport pandas as pd\n\n\n# Read Data\n\nfeature_cols = ['month', 'day_name', 'time', 'TEMPERATURE']\ndf = pd.read_csv('./input.csv')\n\nX = df[feature_cols]\ny = df['TOTALDEMAND']\n\n\n# future prove, increase by 20%\nmax_y = y.max() * 1.1\ny = y/max_y\n\nsize = len(df)\n\ndf_temperature = df['TEMPERATURE']\nmax_temperature = df_temperature.max() * 1.1\ndf_temperature = df_temperature/max_temperature\n\nprint('max demand: ', max_y)\nprint('max temperature: ', max_temperature)\nprint('number of records: ', size)\n\ndf_month = df['month']\ndf_time = df['time']\n\ndf_month_onehot = pd.get_dummies(df_month)\ndf_time_onehot = pd.get_dummies(df_time)\n\n\nX = pd.concat([df_month_onehot, df_time_onehot, df_temperature ], axis=1)\nprint(X.head())\nprint(y.head())\n\nX = X.to_numpy()\ny = y.to_numpy()\n\n\nn_train = int(0.60 * size)\nprint(n_train)\ntrainX, testX = X[:n_train, :], X[n_train:, :]\ntrainy, testy = y[:n_train], y[n_train:]\n\n# Define model\nmodel = Sequential()\nmodel.add(Dense(100, input_dim=61, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(50, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='MSE', optimizer='adam', metrics=['mse'])\n\n\n# Fit model\nhistory = model.fit(trainX, trainy, validation_data=(testX, testy), epochs=100, verbose=0)\n\n# Evaluate the model\n_, train_acc = model.evaluate(trainX, trainy, verbose=0)\n_, test_acc = model.evaluate(testX, testy, verbose=0)\nprint('Train: %.3f, Test: %.3f' % (train_acc, test_acc))\n\ny_predict = model.predict(testX, verbose=0)\n\n\n# Prediction Done\nprint('prediction done')\n\nrmse = mean_squared_error(y_predict, testy, squared = False)\nprint('raw', rmse)\n\n# Plot history\npyplot.plot(history.history['loss'], label='train')\npyplot.plot(history.history['val_loss'], label='test')\npyplot.legend()\npyplot.savefig('nodp.png')\n\ndf_actual = pd.DataFrame(testy, columns = ['Actual Demand'])\ndf_actual = df_actual * max_y\ndf_forecast = pd.DataFrame(y_predict, columns = ['Forecast Demand'])\ndf_forecast = df_forecast * max_y \n\n\ndf_out = pd.concat([df_actual, df_forecast], axis=1)\n\nrmse = mean_squared_error(df_out['Forecast Demand'], df_out['Actual Demand'], squared = False)\nprint('multiplied', rmse)\n\nprint(df_out.head())\ndf_out.to_csv('result.csv', index = False)\n\n","repo_name":"z5282651/UNSW-Project","sub_path":"scrap_pad/forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74171366209","text":"'''\nhttps://contest.yandex.ru/contest/8458/problems/B/\n'''\n\namount = int(input())\ncounter = 0\nmaxCounter = 0\n\nfor i in range(amount):\n x = int(input())\n if x == 1:\n counter += 1\n maxCounter = max(counter, maxCounter)\n else:\n counter = 0\nprint(maxCounter)\n","repo_name":"Uncle-Samvel/algorithms","sub_path":"48.py","file_name":"48.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"42876960161","text":"\n\nwith open('input.txt', 'r') as file_handle:\n\tdata = list(int(j[0]) for j in [i.rstrip().split(' ') for i in file_handle])\n\ndata = sorted(data)\n\ndef partOne(data):\n\tones_difference = []\n\tthrees_difference = []\n\toutlet = 0\n\tbuilt_in_device_rating = max(data) + 3\n\twhile True:\n\t\tif len(ones_difference) + len(threes_difference)*3 == built_in_device_rating:\n\t\t\treturn len(ones_difference)*len(threes_difference)\n\t\telse:\n\t\t\tfor i in range(len(data)-1):\n\t\t\t\tif (data[i]-outlet)==1:\n\t\t\t\t\tones_difference.append(data[i])\n\t\t\t\tif (data[i+1]-data[i])==3:\n\t\t\t\t\tthrees_difference.append(data[i+1])\n\t\t\t\telif ((data[i+1]-data[i])==1):\n\t\t\t\t\tones_difference.append(data[i+1])\n\t\t\tthrees_difference.append(built_in_device_rating)\n\treturn len(ones_difference)*len(threes_difference)\n\nprint(partOne(data))\n\n\"\"\"\nPart 2\n\"\"\"\npaths = [1]\ndef numberofPaths(n):\n\ts = 0\n\tfor i in range(n-1,-1,-1):\n\t\tprint(i)\n\t\tif(data[n]-data[i]<=3):\n\t\t\ts+=paths[i]\n\t\telse: break\n\treturn s\n\ndata = sorted(data)\ndata.append(data[-1]+3)\ndata = [0] + data\nfor i in range(1,len(data)):\n\tpaths.append(numberofPaths(i))\n\tprint(\"x: \", i, \"data\", data[i],\" \", numberofPaths(i))\n\nprint(paths)\nprint(paths[-1])","repo_name":"paras-B/adventofcode2020","sub_path":"Day10/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10488606840","text":"from circularQueue import CircularQueue\n\nclass TNode:\n def __init__(self, data, left, right):\n self.data = data\n self.left = left\n self.right = right\n\ndef count_node(Node):\n if Node is None:\n return 0\n else:\n return 1+count_node(Node.left)+count_node(Node.right)\n\ndef calc_height( Node):\n if Node is None:\n return 0\n hLeft = calc_height(Node.left)\n hRight = calc_height(Node.right)\n if(hLeft > hRight):\n return hLeft+1\n else:\n return hRight+1\n\ndef preorder( Node):\n if Node is not None:\n print(Node.data, end =\" \")\n preorder(Node.left)\n preorder(Node.right)\n\ndef inorder( Node):\n if Node is not None:\n inorder(Node.left)\n print(Node.data, end=\" \")\n inorder(Node.right)\n\ndef postorder( Node):\n if Node is not None:\n postorder(Node.left)\n postorder(Node.right)\n print(Node.data, end=\" \")\n\ndef count_leaf( Node):\n if Node is None:\n return 0\n elif Node.left is None and Node.right is None:\n return 1\n else:\n return count_leaf(Node.left) + count_leaf(Node.right)\n\ndef levelorder(root):\n queue = CircularQueue()\n queue.enqueue(root)\n while not queue.isEmpty():\n n = queue.dequeue()\n if n is not None:\n print(n.data, end=\" \")\n queue.enqueue(n.left)\n queue.enqueue(n.right)\n","repo_name":"KangHyup/Second_grade-First_semester","sub_path":"자료구조/10주차-트리/TreeClass.py","file_name":"TreeClass.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"7974255776","text":"\"\"\"\r\nfile: genetester.py\r\ndescription: CSCI 603 hw6 Group4\r\nlanguage: python3\r\nauthor: Michael Lee ml3406@RIT.EDU\r\n\"\"\"\r\n\r\nimport unittest\r\n\r\n\r\nclass TestInitOfLinkedNode(unittest.TestCase):\r\n def test___init__(self):\r\n \"\"\"\r\n This function tests __init__ from LinkedNode.\r\n \"\"\"\r\n from hw6.dnalist import LinkedNode\r\n a = LinkedNode(1)\r\n # tests value\r\n self.assertEqual(a.value, 1)\r\n # tests link\r\n self.assertEqual(a.link, None)\r\n b = LinkedNode(2, a)\r\n # tests value\r\n self.assertEqual(b.value, 2)\r\n # tests link and value\r\n self.assertEqual(b.link.value, 1)\r\n\r\n\r\nclass TestInit(unittest.TestCase):\r\n def test___init__(self):\r\n \"\"\"\r\n This function tests __init__ from DNAList.\r\n \"\"\"\r\n from hw6.dnalist import DNAList\r\n\r\n # The constructor works with valid argument\r\n self.assertTrue(isinstance(DNAList(), DNAList))\r\n\r\n # The constructor works with valid argument\r\n self.assertTrue(isinstance(DNAList(\"ACGT\"), DNAList))\r\n\r\n # The constructor works with invalid argument\r\n self.assertTrue(isinstance(DNAList(\"KK\"), DNAList))\r\n\r\n # The constructor works with invalid argument\r\n self.assertTrue(isinstance(DNAList(123), DNAList))\r\n\r\n\r\nclass TestEq(unittest.TestCase):\r\n def test___eq__(self):\r\n \"\"\"\r\n This function tests __eq__ from DNAList.\r\n \"\"\"\r\n from hw6.dnalist import DNAList\r\n\r\n # empty DNALists are equal\r\n self.assertEqual(DNAList(), DNAList())\r\n\r\n a = DNAList('ACG')\r\n b = DNAList('ACG')\r\n # two DNALists with the same elements and order are equal\r\n self.assertTrue(a == b)\r\n\r\n c = DNAList('GAC')\r\n # two DNALists with the same elements but different order are not equal\r\n self.assertFalse(a == c)\r\n\r\n\r\nclass TestStr(unittest.TestCase):\r\n def test___str__(self):\r\n \"\"\"\r\n This function tests __str__ from DNAList.\r\n \"\"\"\r\n from hw6.dnalist import DNAList\r\n\r\n # checks if __str__ returns expected result\r\n self.assertEqual(DNAList('ACGT').__str__(), 'ACGT')\r\n\r\n\r\nclass TestAppend(unittest.TestCase):\r\n def test_append(self):\r\n \"\"\"\r\n This function tests append from DNAList.\r\n \"\"\"\r\n from hw6.dnalist import DNAList\r\n\r\n a = DNAList()\r\n a.append('A')\r\n # an empty DNAList appends a single character\r\n self.assertEqual(a, DNAList('A'))\r\n\r\n b = DNAList()\r\n b.append('a')\r\n # ignores case\r\n self.assertEqual(b, DNAList('A'))\r\n\r\n c = DNAList()\r\n c.append('A')\r\n c.append('C')\r\n # an empty DNAList appends a single character and then another\r\n self.assertEqual(c, DNAList('AC'))\r\n\r\n d = DNAList()\r\n d.append('K')\r\n d.append(123)\r\n # an empty DNAList appends invalid objects is still an empty DNAList\r\n self.assertEqual(d, DNAList())\r\n\r\n\r\nclass TestJoin(unittest.TestCase):\r\n def test_join(self):\r\n \"\"\"\r\n This function tests join from DNAList.\r\n \"\"\"\r\n from hw6.dnalist import DNAList\r\n\r\n a = DNAList()\r\n b = DNAList()\r\n a.join(b)\r\n # a DNAList remains the same while joined with an empty DNAList\r\n self.assertEqual(a, DNAList())\r\n\r\n c = DNAList('ACG')\r\n d = DNAList('T')\r\n c.join(d)\r\n # join functions well\r\n self.assertEqual(c, DNAList('ACGT'))\r\n\r\n\r\nclass TestSplice(unittest.TestCase):\r\n def test_splice(self):\r\n \"\"\"\r\n This function tests splice from DNAList.\r\n \"\"\"\r\n from hw6.dnalist import DNAList\r\n\r\n a = DNAList()\r\n a.splice(0, DNAList('AC'))\r\n # an empty DNAList can not be spliced\r\n self.assertEqual(a, DNAList())\r\n\r\n b = DNAList('ACG')\r\n b.splice(0, DNAList('T'))\r\n # splice works at every position in a DNAList\r\n self.assertEqual(b, DNAList('ATCG'))\r\n\r\n c = DNAList('ACG')\r\n c.splice(2, DNAList('T'))\r\n # splice works at every position in a DNAList\r\n self.assertEqual(c, DNAList('ACGT'))\r\n\r\n d = DNAList('ACG')\r\n d.splice(-1, DNAList('T'))\r\n # splice works at every position in a DNAList (len(DNAList)-1)\r\n self.assertEqual(d, DNAList('ACGT'))\r\n\r\n e = DNAList('ACG')\r\n e.splice(-3, DNAList('T'))\r\n # splice works at every position in a DNAList (len(DNAList)-3)\r\n self.assertEqual(e, DNAList('ATCG'))\r\n\r\n f = DNAList('ACG')\r\n f.splice(0, DNAList())\r\n # a DNAList remains the same if spliced with an empty DNAList\r\n self.assertEqual(f, DNAList('ACG'))\r\n\r\n g = DNAList('ACG')\r\n g.splice('a', DNAList('T'))\r\n g.splice(0, 12)\r\n # a DNAList remains the same if the arguments are invalid\r\n self.assertEqual(g, DNAList('ACG'))\r\n\r\n\r\nclass TestSnip(unittest.TestCase):\r\n def test_snip(self):\r\n \"\"\"\r\n This function tests snip from DNAList.\r\n \"\"\"\r\n from hw6.dnalist import DNAList\r\n\r\n a = DNAList()\r\n a.snip(0, 0)\r\n a.snip(0, 1)\r\n a.snip('s', 1)\r\n a.snip(0, 's')\r\n # a DNAList remains the same if the arguments are invalid\r\n self.assertEqual(a, DNAList())\r\n\r\n b = DNAList('AC')\r\n b.snip(0, 1)\r\n # snip works at every position in a DNAList\r\n self.assertEqual(b, DNAList('C'))\r\n\r\n c = DNAList('ACGT')\r\n c.snip(1, 3)\r\n # snip works at every position in a DNAList\r\n self.assertEqual(c, DNAList('AT'))\r\n\r\n\r\nclass TestReplace(unittest.TestCase):\r\n def test_replace(self):\r\n \"\"\"\r\n This function tests replace from DNAList.\r\n \"\"\"\r\n from hw6.dnalist import DNAList\r\n\r\n a = DNAList()\r\n b = DNAList('T')\r\n a.replace('A', b)\r\n # an empty DNAList will not be replaced because there is no target sequence in it\r\n self.assertEqual(a, DNAList())\r\n\r\n c = DNAList('ACG')\r\n c.replace('A', b)\r\n # replace works with a single character\r\n self.assertEqual(c, DNAList('TCG'))\r\n\r\n d = DNAList('ACGTACGT')\r\n d.replace('C', b)\r\n # all target sequences will be replaced\r\n self.assertEqual(d, DNAList('ATGTATGT'))\r\n\r\n e = DNAList('ACGACG')\r\n f = DNAList('TCA')\r\n e.replace('AC', f)\r\n # replace works with two or more characters\r\n self.assertEqual(e, DNAList('TCAGTCAG'))\r\n\r\n g = DNAList('ACGT')\r\n g.replace('AC', 123)\r\n g.replace('AC', 'TA')\r\n g.replace(132, b)\r\n # a DNAList remains the same if the arguments are invalid\r\n self.assertEqual(g, DNAList('ACGT'))\r\n\r\n\r\nclass TestCopy(unittest.TestCase):\r\n def test_copy(self):\r\n \"\"\"\r\n This function tests copy from DNAList.\r\n \"\"\"\r\n from hw6.dnalist import DNAList\r\n\r\n a = DNAList()\r\n b = a.copy()\r\n # checks if the original DNAList and the copied one are equal\r\n self.assertTrue(a == b)\r\n\r\n b.append('A')\r\n # checks if there are two DNALists which are equal or there are just two variable pointing to the same objects\r\n self.assertFalse(a == b)\r\n\r\n\r\nclass TestSizeAndSizeToEnd(unittest.TestCase):\r\n def test_sizeAnd_size_to_end(self):\r\n \"\"\"\r\n This function tests size and _size_to_end from DNAList\r\n \"\"\"\r\n from hw6.dnalist import DNAList\r\n\r\n a = DNAList()\r\n # an empty DNAList should have a size of 0\r\n self.assertTrue(a.size() == 0)\r\n\r\n b = DNAList('ACG')\r\n # the size of a DNAList is the number of nodes in the DNAList\r\n self.assertTrue(b.size() == 3)\r\n\r\n\r\nclass TestEmpty(unittest.TestCase):\r\n def test_empty(self):\r\n \"\"\"\r\n This function tests empty from DNAList.\r\n :return:\r\n \"\"\"\r\n from hw6.dnalist import DNAList\r\n\r\n a = DNAList()\r\n # returns True if the DNAList is empty\r\n self.assertTrue(a.empty())\r\n\r\n b = DNAList('ACG')\r\n # returns False if the DNAList is not empty\r\n self.assertFalse(b.empty())\r\n\r\n\r\n\"\"\"\r\nmain conditional guard\r\nThe following condition checks whether we are running as a script.\r\nIf the file is being imported, don't run the test code.\r\n\"\"\"\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"mlee1120/Computational-Problem-Solving---Python","sub_path":"06/genetester.py","file_name":"genetester.py","file_ext":"py","file_size_in_byte":8417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"2544668397","text":"from PyQt5 import QtWidgets\nimport sys\nfrom modules.mainWindow import MainWindow\nfrom PyQt5 import QtGui\nfrom PyQt5.QtWinExtras import QWinTaskbarButton, QWinTaskbarProgress\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n app.setWindowIcon(QtGui.QIcon('icon.ico'))\n window = MainWindow()\n window.setWindowIcon(QtGui.QIcon('icon.ico'))\n window.show()\n window.taskbar_button = QWinTaskbarButton()\n window.taskbar_button.setWindow(window.windowHandle())\n window.taskbar_button.setOverlayIcon(QtGui.QIcon('icon.ico'))\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"art3xa/keyboardtrainer","sub_path":"keyboardtrainer.py","file_name":"keyboardtrainer.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10936142231","text":"import time\n\ncurrent_milli_time = lambda: int(round(time.time() * 1000))\n\n\ndef sum2020(data):\n print('start data scan')\n for i in data:\n for j in data:\n for k in data:\n if i + j + k == 2020:\n return i, j, k\n return None\n\n\nif __name__ == \"__main__\":\n start = current_milli_time()\n\n file1 = open('in', 'r')\n Lines = file1.readlines()\n\n data = [int(i) for i in Lines]\n\n data.sort()\n j, i, k = sum2020(data)\n print(i * j * k)\n end = current_milli_time() - start","repo_name":"BDafflon/adventofcode2020","sub_path":"day1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"14881051471","text":"import argparse\n\n\nclass KeyValueAction(argparse.Action):\n \"\"\"A custom action to parse arguments as key=value pairs\n\n Ensures that ``dest`` is a dict\n \"\"\"\n\n def __call__(self, parser, namespace, values, option_string=None):\n # Make sure we have an empty dict rather than None\n if getattr(namespace, self.dest, None) is None:\n setattr(namespace, self.dest, {})\n\n # Add value if an assignment else remove it\n if '=' in values:\n getattr(namespace, self.dest, {}).update([values.split('=', 1)])\n else:\n getattr(namespace, self.dest, {}).pop(values, None)\n\n\nclass RangeAction(argparse.Action):\n \"\"\"A custom action to parse a single value or a range of values\n\n Parses single integer values or a range of integer values delimited\n by a colon and returns a tuple of integers:\n '4' sets ``dest`` to (4, 4)\n '6:9' sets ``dest`` to (6, 9)\n \"\"\"\n\n def __call__(self, parser, namespace, values, option_string=None):\n range = values.split(':')\n if len(range) == 0:\n # Nothing passed, return a zero default\n setattr(namespace, self.dest, (0, 0))\n elif len(range) == 1:\n # Only a single value is present\n setattr(namespace, self.dest, (int(range[0]), int(range[0])))\n elif len(range) == 2:\n # Range of two values\n if int(range[0]) <= int(range[1]):\n setattr(namespace, self.dest, (int(range[0]), int(range[1])))\n else:\n msg = \"Invalid range, %s is not less than %s\" % \\\n (range[0], range[1])\n raise argparse.ArgumentError(self, msg)\n else:\n # Too many values\n msg = \"Invalid range, too many values\"\n raise argparse.ArgumentError(self, msg)\n","repo_name":"codybum/OpenStackInAction","sub_path":"scripts/icehouse/opt/stack/python-openstackclient/openstackclient/common/parseractions.py","file_name":"parseractions.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"43"} +{"seq_id":"32681257481","text":"from typing import List\n\n\nclass Solution:\n def numSimilarGroups(self, strs: List[str]) -> int:\n\n L = len(strs)\n\n if L==1:\n return 1 \n\n edges = [[0 for _ in range(L)] for _ in range(L) ] \n groups = [-1 for _ in range(L)]\n\n for i in range(L) :\n for j in range(i , L):\n if self._judge_sim( strs[i], strs[j] ) :\n edges[i][j], edges[j][i] = 1, 1 \n\n ret = 0\n for i in range(L) :\n\n if groups[i]==-1:\n self._dfs(edges, groups, i, ret)\n ret += 1 \n\n return ret \n\n def _dfs(self, edges, groups, i, ret):\n\n L = len(edges)\n groups[i] = ret \n\n for j in range(L):\n\n if edges[i][j]==1 and groups[j]==-1:\n\n self._dfs(edges, groups, j, ret)\n\n def _judge_sim(self, x:str, y:str) -> bool :\n\n N = len(x) \n temp_list = []\n\n for i in range(N):\n\n if x[i] != y[i] :\n temp_list.append(i) \n\n if len(temp_list) != 2:\n return False \n \n return True \n\ns = Solution()\nprint(s.numSimilarGroups( [\"omv\",\"ovm\"] ))","repo_name":"Tommote/leetcode_python","sub_path":"graph/SimilarStringGroups_839.py","file_name":"SimilarStringGroups_839.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"41883728652","text":"import os\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom citeminer.types import Author, Publication\n\n\nclass Markdown(object):\n def __init__(self) -> None:\n super().__init__()\n\n @property\n def stream(self) -> str:\n return \"\"\n\n def __str__(self) -> str:\n return self.stream\n\n def __repr__(self) -> str:\n return self.stream\n\n def transform(self, text: str) -> str:\n text = text.replace(\"*\", \"\\*\")\n text = text.replace(\"`\", \"\\`\")\n text = text.replace(\"_\", \"\\_\")\n text = text.replace(\"{\", \"\\{\")\n text = text.replace(\"}\", \"\\}\")\n text = text.replace(\"[\", \"\\[\")\n text = text.replace(\"]\", \"\\]\")\n text = text.replace(\"(\", \"\\(\")\n text = text.replace(\")\", \"\\)\")\n text = text.replace(\"#\", \"\\#\")\n text = text.replace(\"+\", \"\\+\")\n text = text.replace(\"-\", \"\\-\")\n text = text.replace(\"!\", \"\\!\")\n text = text.replace(\"&\", \"&\")\n text = text.replace(\"<\", \"<\")\n return text\n\n\nclass SingleLineBreak(Markdown):\n def __init__(self) -> None:\n super().__init__()\n\n @property\n def stream(self) -> str:\n return \" \\n\"\n\n\nclass DoubleLineBreak(Markdown):\n def __init__(self) -> None:\n super().__init__()\n\n @property\n def stream(self) -> str:\n return \"\\n \\n\"\n\n\nclass Space(Markdown):\n def __init__(self) -> None:\n super().__init__()\n\n @property\n def stream(self) -> str:\n return \" \"\n\n\nclass PlainText(Markdown):\n def __init__(self, text: str, endline: bool = True) -> None:\n super().__init__()\n self.text = text\n self.endline = endline\n\n @property\n def stream(self) -> str:\n if self.endline:\n return self.text + \"\\n\"\n else:\n return self.text\n\n\nclass Header(Markdown):\n def __init__(self, text: Union[str, Markdown], level: int = 2) -> None:\n super().__init__()\n if isinstance(text, Markdown):\n self.text = text.stream\n else:\n self.text = text\n self.level = level\n\n @property\n def stream(self):\n return \"#\" * self.level + \" \" + self.text + \"\\n\"\n\n\nclass Hyperlink(Markdown):\n def __init__(self, link_text: str, link_url: str) -> None:\n super().__init__()\n self.link_text = link_text\n self.link_url = link_url\n\n @property\n def stream(self):\n return \"[\" + self.link_text + \"]\" + \"(\" + self.link_url + \")\"\n\n\nclass Emphasis(Markdown):\n def __init__(self, text: str, endline=True) -> None:\n super().__init__()\n self.text = text\n self.endline = endline\n\n @property\n def stream(self) -> str:\n if self.endline:\n return \"*\" + self.text + \"*\" + \"\\n\"\n else:\n return \"*\" + self.text + \"*\"\n\n\nclass Strong(Markdown):\n def __init__(self, text: str, endline: bool = True) -> None:\n super().__init__()\n self.text = text\n self.endline = endline\n\n @property\n def stream(self) -> str:\n if self.endline:\n return \"**\" + self.text + \"**\" + \"\\n\"\n else:\n return \"**\" + self.text + \"**\"\n\n\nclass Sequence(Markdown):\n def __init__(self, text_list: List[Markdown]) -> None:\n super().__init__()\n self.text_list = text_list\n\n @property\n def stream(self) -> str:\n return \"\".join([item.stream for item in self.text_list])\n\n\nclass Blockquote(Markdown):\n def __init__(self, text: str, endline: bool = True) -> None:\n super().__init__()\n self.text = text\n self.endline = endline\n\n @property\n def stream(self) -> str:\n if self.endline:\n return \"> \" + self.text + \"\\n\"\n else:\n return \"> \" + self.text\n\n\nclass CitingPublication(Markdown):\n def __init__(self, publication: Publication, comments: List[str] = []) -> None:\n super().__init__()\n self.title = publication[\"bib\"][\"title\"]\n if publication[\"filled\"]:\n self.author = publication[\"bib\"][\"author\"]\n else:\n self.author = \", \".join(publication[\"bib\"][\"author\"])\n\n if \"journal\" in publication[\"bib\"].keys():\n self.journal_or_book = publication[\"bib\"][\"journal\"]\n self.in_journal = True\n elif \"booktitle\" in publication[\"bib\"].keys():\n self.journal_or_book = publication[\"bib\"][\"booktitle\"]\n self.in_journal = False\n else: # todo: fix it\n self.journal_or_book = \"Unknown\"\n self.in_journal = True\n\n self.abstract = publication[\"bib\"][\"abstract\"]\n\n self.pdf_link = publication[\"pub_url\"]\n\n if \"http\" not in self.pdf_link:\n self.pdf_link = os.path.abspath(self.pdf_link).replace(\" \", \"%20\")\n\n self.comments = comments\n\n @property\n def stream(self) -> str:\n quote_blocks: List[Markdown] = []\n for comment in self.comments:\n quote_blocks.append(Blockquote(comment))\n quote_blocks.append(DoubleLineBreak())\n\n return Sequence(\n [\n Header(\n Hyperlink(self.title, self.pdf_link)\n if self.pdf_link is not None\n else PlainText(self.title)\n ),\n SingleLineBreak(),\n Emphasis(\"Author:\", endline=False),\n Space(),\n PlainText(self.author),\n SingleLineBreak(),\n Emphasis(\n \"Journal:\" if self.in_journal else \"Booktitle:\", endline=False\n ),\n Space(),\n PlainText(self.journal_or_book),\n SingleLineBreak(),\n Emphasis(\"Abstruct:\"),\n Space(),\n SingleLineBreak(),\n PlainText(self.abstract),\n DoubleLineBreak(),\n *quote_blocks,\n ]\n ).stream\n\n\nclass CitingDocument(Markdown):\n def __init__(\n self,\n cited: Union[Publication, str] = \"\",\n publications: List[Dict[str, Any]] = [],\n document_path: str = \"summary.md\",\n ) -> None:\n super().__init__()\n self.publications = publications\n if isinstance(cited, str):\n self.cited_publication = cited\n else:\n self.cited_publication = cited[\"bib\"][\"title\"]\n self.path = document_path\n\n @property\n def stream(self) -> str:\n return Sequence(\n [\n Header(self.cited_publication, level=1),\n DoubleLineBreak(),\n *[CitingPublication(**pub) for pub in self.publications],\n ]\n ).stream\n\n def add_publication(self, pub: Dict[str, Any]) -> None:\n self.publications.append(pub)\n\n def save(self) -> None:\n with open(self.path, \"w\") as summary_file:\n summary_file.write(self.stream)\n\n\nif __name__ == \"__main__\":\n pub = {\n \"author_id\": [\"nRQi4O8AAAAJ\", \"bW6qGV0AAAAJ\", \"XqLiBQMAAAAJ\", \"nujTx04AAAAJ\"],\n \"bib\": {\n \"abstract\": \"This article is about a curious phenomenon. Suppose we \"\n \"have a data matrix, which is the superposition of a \"\n \"low-rank component and a sparse component. Can we \"\n \"recover each component individually? We prove that under \"\n \"some suitable assumptions, it is possible to\",\n \"author\": \"Cand{\\\\`e}s, Emmanuel J and Li, Xiaodong and Ma, Yi and \"\n \"Wright, John\",\n \"bib_id\": \"candes2011robust\",\n \"journal\": \"Journal of the ACM (JACM)\",\n \"number\": \"3\",\n \"pages\": \"1--37\",\n \"pub_type\": \"article\",\n \"pub_year\": \"2011\",\n \"publisher\": \"ACM New York, NY, USA\",\n \"title\": \"Robust principal component analysis?\",\n \"venue\": \"Journal of the ACM (JACM)\",\n \"volume\": \"58\",\n },\n \"citedby_url\": \"/scholar?cites=9000237782786002248&as_sdt=2005&sciodt=0,5&hl=en\",\n \"eprint_url\": \"https://arxiv.org/pdf/0912.3599\",\n \"filled\": True,\n \"gsrank\": 1,\n \"num_citations\": 5637,\n \"pub_url\": \"https://dl.acm.org/doi/abs/10.1145/1970392.1970395\",\n \"source\": \"PUBLICATION_SEARCH_SNIPPET\",\n \"url_add_sclib\": \"/citations?hl=en&xsrf=&continue=/scholar%3Fhl%3Den%26as_sdt%3D0,5%26sciodt%3D0,5%26cites%3D17633993505446855025%26scipsc%3D&citilm=1&json=&update_op=library_add&info=SE0CAZRE53wJ&ei=6tMUYNK0KI7MyAS7vZPQDA\",\n \"url_scholarbib\": \"/scholar?q=info:SE0CAZRE53wJ:scholar.google.com/&output=cite&scirp=0&hl=en\",\n }\n with open(\"test.md\", \"w\") as f:\n f.write(CitingPublication(pub).stream)\n\n print(\"hello\")\n","repo_name":"Phimos/Comments-Mining-System-for-Scholar-Citations","sub_path":"citeminer/utils/markdown_writer.py","file_name":"markdown_writer.py","file_ext":"py","file_size_in_byte":8723,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"70943977411","text":"#\n# @lc app=leetcode.cn id=25 lang=python3\n#\n# [25] K 个一组翻转链表\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def reverseKGroup(self, head: ListNode, k: int) -> ListNode:\n cur = head\n count = 0\n while cur and count != k:\n cur = cur.next\n count += 1\n if count == k:\n cur = self.reverseKGroup(cur,k);\n while count:\n tmp = head.next\n head.next = cur\n cur = head\n head = tmp\n count -= 1\n head = cur \n return head\n \n# @lc code=end\n\n","repo_name":"jackwener/leetcode","sub_path":"25.k-个一组翻转链表.py","file_name":"25.k-个一组翻转链表.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33125866181","text":"import argparse\nfrom datetime import datetime\nfrom kimai_util import Werkstudent\nfrom pathlib import Path\nimport logging\n\nTHIS_LOCATION = Path(__file__).parent.resolve()\n\n\ndef main():\n for ws in Werkstudent.get_all_active():\n print(ws._alias, ws.get_holiday_eligibility())\n return 0\n\n\nif __name__ == \"__main__\":\n thisfile = Path(__file__).resolve()\n logging.basicConfig(filename=str(thisfile.parent.parent / f\"kimai2_autohire_{thisfile.stem}.log\"), \n format=\"%(asctime)s %(levelname)s %(message)s\", level=logging.INFO)\n try:\n #parser = argparse.ArgumentParser(description=\"create available holidays report for working students or set holidays taken\")\n #parser.add_argument(\"--all\", action=\"store_true\", help=\"run the calculation for all students and send them emails about it\")\n #parser.add_argument(\"name\", action=\"store\", help=\"the name of the student to run the function for\")\n ret = main()\n except Exception as e:\n logging.exception(f\"Uncaught exception in from main! {e}\")\n ret = -1\n exit(ret)","repo_name":"synsi23b/kimai2-autohire","sub_path":"student_holiday.py","file_name":"student_holiday.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10875617137","text":"from __future__ import annotations\nfrom typing import Union\nfrom typing import Dict\nfrom typing import Callable\nfrom typing import Optional\n\nimport logging\n\nfrom silx.gui import qt\nfrom silx.gui.widgets.LegendIconWidget import LegendIconWidget\nfrom silx.gui import colors as silx_colors\nfrom silx.gui import icons\nfrom silx.gui import utils\n\nfrom bliss.flint.model import flint_model\nfrom bliss.flint.model import plot_model\nfrom bliss.flint.model import plot_item_model\nfrom bliss.flint.model import scan_model\nfrom bliss.flint.model import style_model\nfrom bliss.flint.widgets.eye_check_box import EyeCheckBox\nfrom bliss.flint.helper import model_helper\nfrom bliss.flint.widgets.style_dialog import StyleDialogEditor\nfrom bliss.flint.widgets.style_dialog import FlintColormapDialog\n\n\n_logger = logging.getLogger(__name__)\n\n\nPlotItemRole = qt.Qt.UserRole + 100\nVisibilityRole = qt.Qt.UserRole + 101\nRadioRole = qt.Qt.UserRole + 102\nCheckRole = qt.Qt.UserRole + 103\nFlintModelRole = qt.Qt.UserRole + 104\nObjectRole = qt.Qt.UserRole + 105\n\n\n_colormapPixmap: Dict[str, qt.QPixmap] = {}\n_COLORMAP_PIXMAP_SIZE = 32\n\n\nclass VisibilityPropertyItemDelegate(qt.QStyledItemDelegate):\n def createEditor(self, parent, option, index):\n if not index.isValid():\n return super(VisibilityPropertyItemDelegate, self).createEditor(\n parent, option, index\n )\n\n editor = EyeCheckBox(parent=parent)\n editor.toggled.connect(self.__commitData)\n state = index.data(VisibilityRole)\n editor.setChecked(state == qt.Qt.Checked)\n self.__updateEditorStyle(editor, state)\n return editor\n\n def __commitData(self):\n editor = self.sender()\n self.commitData.emit(editor)\n\n def __updateEditorStyle(self, editor: qt.QCheckBox, state: qt.Qt.CheckState):\n editor.setVisible(state is not None)\n\n def setEditorData(self, editor, index):\n state = index.data(VisibilityRole)\n self.__updateEditorStyle(editor, state)\n\n def setModelData(self, editor, model, index):\n state = qt.Qt.Checked if editor.isChecked() else qt.Qt.Unchecked\n model.setData(index, state, role=VisibilityRole)\n\n def updateEditorGeometry(self, editor, option, index):\n # Center the widget to the cell\n size = editor.sizeHint()\n half = size / 2\n halfPoint = qt.QPoint(half.width(), half.height() - 1)\n pos = option.rect.center() - halfPoint\n editor.move(pos)\n\n\nclass CheckBoxItemDelegate(qt.QStyledItemDelegate):\n \"\"\"CheckBox delegate to edit CheckStateRole only.\n\n Without that Qt is not able to display properly a check box without\n the text on the side.\n\n This allows to center the check box and hide a bug which make the default\n check box hit box at the wrong location (cause of custom the cell margin).\n\n Use a custom CheckRole to avoid to display the default check box on\n background.\n \"\"\"\n\n def createEditor(self, parent, option, index):\n if not index.isValid():\n return super(CheckBoxItemDelegate, self).createEditor(parent, option, index)\n\n # Create group to avoid interferences\n editor = qt.QWidget(parent=parent)\n editor.setContentsMargins(1, 1, 1, 1)\n layout = qt.QHBoxLayout(editor)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(1)\n\n check = qt.QCheckBox(parent=editor)\n check.setObjectName(\"check\")\n check.toggled.connect(self.__commitData)\n check.setMinimumSize(check.minimumSizeHint())\n check.setMaximumSize(check.minimumSizeHint())\n layout.addWidget(check)\n\n self.setEditorData(editor, index)\n return editor\n\n def __commitData(self):\n editor = self.sender().parent()\n self.commitData.emit(editor)\n\n def setEditorData(self, editor, index):\n check = editor.findChildren(qt.QCheckBox, \"check\")[0]\n state = index.data(role=CheckRole)\n with utils.blockSignals(check):\n check.setVisible(state is not None)\n check.setChecked(state == qt.Qt.Checked)\n\n def setModelData(self, editor, model, index):\n check = editor.findChildren(qt.QCheckBox, \"check\")[0]\n state = qt.Qt.Checked if check.isChecked() else qt.Qt.Unchecked\n model.setData(index, state, role=CheckRole)\n\n def updateEditorGeometry(self, editor, option, index):\n # Center the widget to the cell\n size = editor.sizeHint()\n half = size / 2\n halfPoint = qt.QPoint(half.width(), half.height() - 1)\n pos = option.rect.center() - halfPoint\n editor.move(pos)\n\n\nclass StyleItemDelegate(qt.QStyledItemDelegate):\n \"\"\"Style delegate to edit item style.\n \"\"\"\n\n def __init__(self, parent=None, editable=True):\n qt.QStyledItemDelegate.__init__(self, parent=parent)\n self.__editable = editable\n\n def createEditor(self, parent, option, index):\n if not index.isValid():\n return super(StyleItemDelegate, self).createEditor(parent, option, index)\n\n editor = StylePropertyWidget(parent)\n editor.setEditable(self.__editable)\n editor.setMinimumSize(editor.sizeHint())\n self.__updateEditor(editor, index)\n return editor\n\n def __updateEditor(self, editor: qt.QWidget, index: qt.QModelIndex):\n plotItem = index.data(PlotItemRole)\n flintModel = index.data(FlintModelRole)\n editor.setPlotItem(plotItem)\n editor.setFlintModel(flintModel)\n\n def setEditorData(self, editor, index):\n self.__updateEditor(editor, index)\n\n def setModelData(self, editor, model, index):\n pass\n\n def updateEditorGeometry(self, editor, option, index):\n # Center the widget to the cell\n size = editor.sizeHint()\n half = size / 2\n halfPoint = qt.QPoint(half.width(), half.height() - 1)\n pos = option.rect.center() - halfPoint\n editor.move(pos)\n\n\nclass RemovePropertyItemDelegate(qt.QStyledItemDelegate):\n def __init__(self, parent):\n qt.QStyledItemDelegate.__init__(self, parent=parent)\n\n def createEditor(self, parent, option, index):\n if not index.isValid():\n return super(RemovePropertyItemDelegate, self).createEditor(\n parent, option, index\n )\n editor = RemovePlotItemButton(parent=parent)\n plotItem = self.getPlotItem(index)\n editor.setVisible(plotItem is not None)\n return editor\n\n def getPlotItem(self, index) -> Union[None, plot_model.Item]:\n plotItem = index.data(PlotItemRole)\n if not isinstance(plotItem, plot_model.Item):\n return None\n return plotItem\n\n def setEditorData(self, editor, index):\n plotItem = self.getPlotItem(index)\n editor.setVisible(plotItem is not None)\n editor.setPlotItem(plotItem)\n\n def setModelData(self, editor, model, index):\n pass\n\n\nclass ScanStyleDelegate(qt.QStyledItemDelegate):\n \"\"\"Style delegate to display scan style.\n \"\"\"\n\n EDITOR_ALWAYS_OPEN = True\n\n def __init__(self, parent=None, editable=False):\n qt.QStyledItemDelegate.__init__(self, parent=parent)\n self.__editable = editable\n\n def createEditor(self, parent, option, index):\n if not index.isValid():\n return super(ScanStyleDelegate, self).createEditor(parent, option, index)\n\n editor = StylePropertyWidget(parent)\n editor.setEditable(self.__editable)\n editor.setMinimumSize(editor.sizeHint())\n editor.setCheckItemValidity(True)\n self.__updateEditor(editor, index)\n return editor\n\n def __getFirstItem(self, plotModel: plot_model.Plot):\n if plotModel is None:\n return None\n for item in plotModel.items():\n if isinstance(item, plot_item_model.ScanItem):\n pass\n elif isinstance(item, plot_model.ComputableMixIn):\n pass\n else:\n return item\n return None\n\n def __updateEditor(self, editor: qt.QWidget, index: qt.QModelIndex):\n scanItem = index.data(ObjectRole)\n plotItem = scanItem.plotItem\n if plotItem is None:\n plotItem = self.__getFirstItem(scanItem.plotModel)\n editor.setScan(scanItem.scan)\n editor.setPlotItem(plotItem)\n\n def setEditorData(self, editor, index):\n self.__updateEditor(editor, index)\n\n def setModelData(self, editor, model, index):\n pass\n\n def updateEditorGeometry(self, editor, option, index):\n # Center the widget to the cell\n size = editor.sizeHint()\n half = size / 2\n halfPoint = qt.QPoint(half.width(), half.height() - 1)\n pos = option.rect.center() - halfPoint\n editor.move(pos)\n\n\nclass RemovePlotItemButton(qt.QToolButton):\n def __init__(self, parent: qt.QWidget = None):\n super(RemovePlotItemButton, self).__init__(parent=parent)\n self.__plotItem: Optional[plot_model.Item] = None\n self.clicked.connect(self.__requestRemoveItem)\n icon = icons.getQIcon(\"flint:icons/remove-item\")\n self.setIcon(icon)\n self.setAutoRaise(True)\n\n def __requestRemoveItem(self):\n plotItem = self.__plotItem\n plotModel = plotItem.plot()\n if plotModel is not None:\n model_helper.removeItemAndKeepAxes(plotModel, plotItem)\n # FIXME: It would be better to make it part of the model\n plotModel.tagUserEditTime()\n\n def setPlotItem(self, plotItem: plot_model.Item):\n self.__plotItem = plotItem\n\n\nclass RemoveScanButton(qt.QToolButton):\n def __init__(self, parent: qt.QWidget = None):\n super(RemoveScanButton, self).__init__(parent=parent)\n self.__scan: Optional[scan_model.Scan] = None\n self.__plotWidget: Optional[qt.QWidget] = None\n self.clicked.connect(self.__requestRemoveScan)\n icon = icons.getQIcon(\"flint:icons/remove-item\")\n self.setIcon(icon)\n self.setAutoRaise(True)\n\n def __requestRemoveScan(self):\n widget = self.__plotWidget\n widget.removeScan(self.__scan)\n\n def setScan(self, scan: scan_model.Scan):\n self.__scan = scan\n\n def setPlotWidget(self, plotWidget: qt.QWidget):\n self.__plotWidget = plotWidget\n\n\nclass RemoveScanDelegate(qt.QStyledItemDelegate):\n EDITOR_ALWAYS_OPEN = True\n\n def createEditor(self, parent, option, index):\n if not index.isValid():\n return super(RemoveScanDelegate, self).createEditor(parent, option, index)\n editor = RemoveScanButton(parent=parent)\n self.setEditorData(editor, index)\n return editor\n\n def setEditorData(self, editor, index):\n scanItem = index.data(ObjectRole)\n editor.setScan(scanItem.scan)\n editor.setPlotWidget(scanItem.curveWidget)\n isRemovable = True\n if scanItem.scan is None or scanItem.curveWidget is None:\n isRemovable = False\n else:\n if scanItem.scan is scanItem.curveWidget.scan():\n isRemovable = False\n editor.setToolTip(\"The active scan can't be removed\")\n editor.setEnabled(isRemovable)\n\n def setModelData(self, editor, model, index):\n pass\n\n\nclass ScanNumberDelegate(qt.QStyledItemDelegate):\n def initStyleOption(self, option: qt.QStyleOptionViewItem, index: qt.QModelIndex):\n scanItem = index.data(ObjectRole)\n scanInfo = scanItem.scan.scanInfo()\n scanNb = scanInfo.get(\"scan_nb\", None)\n if scanNb is None:\n scanNb = \"\"\n else:\n scanNb = f\"#{scanNb}\"\n option.text = scanNb\n\n def sizeHint(self, option: qt.QStyleOptionViewItem, index: qt.QModelIndex):\n size = option.fontMetrics.size(qt.Qt.TextSingleLine, \"####\")\n return size\n\n\nclass ScanTitleDelegate(qt.QStyledItemDelegate):\n def initStyleOption(self, option: qt.QStyleOptionViewItem, index: qt.QModelIndex):\n scanItem = index.data(ObjectRole)\n scanInfo = scanItem.scan.scanInfo()\n scanTitle = scanInfo.get(\"title\", None)\n if scanTitle is None:\n scanTitle = scanInfo.get(\"type\", None)\n if scanTitle is None:\n scanTitle = \"\"\n option.text = scanTitle\n\n\nclass ScanStartTimeDelegate(qt.QStyledItemDelegate):\n def __toStartTimeText(self, scan: scan_model.Scan) -> str:\n value = scan.startTime()\n if value is None:\n return \"\"\n return value.strftime(\"%H:%M\")\n\n def initStyleOption(self, option: qt.QStyleOptionViewItem, index: qt.QModelIndex):\n scanItem = index.data(ObjectRole)\n text = self.__toStartTimeText(scanItem.scan)\n option.text = text\n\n def sizeHint(self, option: qt.QStyleOptionViewItem, index: qt.QModelIndex):\n size = option.fontMetrics.size(qt.Qt.TextSingleLine, \"##:##\")\n return size\n\n\nclass StylePropertyWidget(qt.QWidget):\n def __init__(self, parent):\n super(StylePropertyWidget, self).__init__(parent=parent)\n layout = qt.QHBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(0)\n self.setLayout(layout)\n\n self.__legend = LegendIconWidget(self)\n self.__legend.setFixedWidth(30)\n layout.addWidget(self.__legend)\n layout.addSpacing(2)\n\n self.__displayContrast = False\n\n self.__buttonStyle: Optional[qt.QToolButton] = None\n self.__buttonContrast: Optional[qt.QToolButton] = None\n\n self.__flintModel: Optional[flint_model.FlintState] = None\n self.__plotItem: Optional[plot_model.Item] = None\n self.__plotModel: Optional[plot_model.Plot] = None\n \"\"\"Holder the item plot to avoid earlier release of the plot\"\"\"\n self.__scan: Union[None, scan_model.Scan] = None\n self.__checkItem = False\n\n def setCheckItemValidity(self, check: bool):\n self.__checkItem = check\n self.__update()\n\n def setEditable(self, isEditable):\n \"\"\"Set the widget editable.\n\n A button is enabled to be able to edit the style, and to propagate it to\n the item.\n \"\"\"\n style = self.style()\n w = style.pixelMetric(qt.QStyle.PM_ExclusiveIndicatorWidth)\n h = style.pixelMetric(qt.QStyle.PM_ExclusiveIndicatorHeight)\n indicatorSize = qt.QSize(w, h) + qt.QSize(4, 4)\n\n layout = self.layout()\n\n if self.__buttonStyle is not None:\n self.__buttonStyle.setVisible(isEditable)\n elif isEditable:\n icon = icons.getQIcon(\"flint:icons/style\")\n self.__buttonStyle = qt.QToolButton(self)\n self.__buttonStyle.setToolTip(\"Edit the style of this item\")\n self.__buttonStyle.setIcon(icon)\n self.__buttonStyle.setAutoRaise(True)\n self.__buttonStyle.clicked.connect(self.__editStyle)\n self.__buttonStyle.setFixedSize(indicatorSize)\n layout.addWidget(self.__buttonStyle)\n\n if self.__buttonContrast is not None:\n self.__buttonContrast.setVisible(isEditable)\n elif isEditable and self.__displayContrast:\n icon = icons.getQIcon(\"flint:icons/contrast\")\n self.__buttonContrast = qt.QToolButton(self)\n self.__buttonContrast.setToolTip(\"Edit the contrast of this item\")\n self.__buttonContrast.setIcon(icon)\n self.__buttonContrast.setAutoRaise(True)\n self.__buttonContrast.clicked.connect(self.__editConstrast)\n self.__buttonContrast.setFixedSize(indicatorSize)\n layout.addWidget(self.__buttonContrast)\n self.__updateEditButton()\n\n def __updateEditButton(self):\n if self.__buttonContrast is not None:\n visible = self.__plotItem is not None and isinstance(\n self.__plotItem,\n (plot_item_model.ImageItem, plot_item_model.ScatterItem),\n )\n self.__buttonContrast.setVisible(visible)\n\n def __editStyle(self):\n if self.__plotItem is None:\n return\n dialog = StyleDialogEditor(self)\n dialog.setPlotItem(self.__plotItem)\n dialog.setFlintModel(self.__flintModel)\n result = dialog.exec_()\n if result:\n style = dialog.selectedStyle()\n self.__plotItem.setCustomStyle(style)\n\n def __editConstrast(self):\n if self.__plotItem is None:\n return\n\n scan = self.__scan\n item = self.__plotItem\n item.customStyle()\n\n style = item.getStyle(scan)\n colormap = model_helper.getColormapFromItem(item, style)\n\n saveCustomStyle = item.customStyle()\n saveColormap = item.colormap().copy()\n\n def updateCustomStyle():\n style = item.getStyle(scan)\n style = style_model.Style(colormapLut=colormap.getName(), style=style)\n item.setCustomStyle(style)\n\n colormap.sigChanged.connect(updateCustomStyle)\n\n dialog = FlintColormapDialog(self)\n dialog.setModal(True)\n dialog.setPlotItem(item, scan)\n dialog.setColormap(colormap)\n result = dialog.exec_()\n if result:\n style = item.customStyle()\n style = style_model.Style(colormapLut=colormap.getName(), style=style)\n self.__plotItem.setCustomStyle(style)\n else:\n item.setCustomStyle(saveCustomStyle)\n item.colormap().setFromColormap(saveColormap)\n\n def setPlotItem(self, plotItem: plot_model.Item):\n if self.__plotItem is not None:\n self.__plotItem.valueChanged.disconnect(self.__plotItemChanged)\n self.__plotItem = plotItem\n if self.__plotItem is not None:\n self.__plotModel = self.__plotItem.plot()\n self.__plotItem.valueChanged.connect(self.__plotItemChanged)\n self.__plotItemStyleChanged()\n else:\n self.__plotModel = None\n self.__updateEditButton()\n\n def setFlintModel(self, flintModel: flint_model.FlintState = None):\n if self.__flintModel is not None:\n self.__flintModel.currentScanChanged.disconnect(self.__currentScanChanged)\n self.__setScan(None)\n self.__flintModel = flintModel\n if self.__flintModel is not None:\n self.__flintModel.currentScanChanged.connect(self.__currentScanChanged)\n self.__setScan(self.__flintModel.currentScan())\n\n def __currentScanChanged(self):\n self.__setScan(self.__flintModel.currentScan())\n\n def setScan(self, scan: Union[None, scan_model.Scan]):\n self.__scan = scan\n self.__update()\n\n def __setScan(self, scan: Union[None, scan_model.Scan]):\n self.__scan = scan\n self.__update()\n\n def __plotItemChanged(self, eventType):\n if eventType == plot_model.ChangeEventType.CUSTOM_STYLE:\n self.__plotItemStyleChanged()\n\n def __plotItemStyleChanged(self):\n self.__update()\n\n def getQColor(self, color):\n if color is None:\n return qt.QColor()\n return silx_colors.asQColor(color)\n\n def __updateScatter(self, style: plot_model.Style):\n pointBased = True\n if style.fillStyle is not style_model.FillStyle.NO_FILL:\n if not isinstance(style.fillStyle, str):\n pointBased = False\n self.__legend.setColormap(style.colormapLut)\n else:\n self.__legend.setColormap(None)\n else:\n self.__legend.setColormap(None)\n\n if style.lineStyle is not style_model.LineStyle.NO_LINE:\n self.__legend.setLineStyle(\"-\")\n color = self.getQColor(style.lineColor)\n self.__legend.setLineColor(color)\n self.__legend.setLineWidth(1.5)\n else:\n self.__legend.setLineStyle(\" \")\n\n if pointBased:\n if style.symbolStyle == style_model.SymbolStyle.NO_SYMBOL:\n symbolStyle = \"o\"\n else:\n symbolStyle = style_model.symbol_to_silx(style.symbolStyle)\n self.__legend.setSymbol(symbolStyle)\n self.__legend.setSymbolColormap(style.colormapLut)\n self.__legend.setSymbolColor(None)\n elif style.symbolStyle is not style_model.SymbolStyle.NO_SYMBOL:\n symbolStyle = style_model.symbol_to_silx(style.symbolStyle)\n self.__legend.setSymbol(symbolStyle)\n color = self.getQColor(style.symbolColor)\n self.__legend.setSymbolColor(color)\n self.__legend.setSymbolColormap(None)\n else:\n self.__legend.setSymbol(\" \")\n\n def __isItemAvailable(self, item: plot_model.Item, scan: scan_model.Scan):\n if not self.__checkItem:\n return True\n if scan is None:\n return False\n return item.isAvailableInScan(scan)\n\n def __update(self):\n plotItem = self.__plotItem\n if plotItem is None:\n self.__legend.setLineColor(\"red\")\n self.__legend.setLineStyle(\":\")\n self.__legend.setLineWidth(1.5)\n else:\n scan = self.__scan\n try:\n if not self.__isItemAvailable(plotItem, scan):\n self.__legend.setSymbol(\"x\")\n self.__legend.setSymbolColor(\"red\")\n self.setToolTip(\"Not available in this scan\")\n elif isinstance(plotItem, plot_item_model.ScatterItem):\n self.setToolTip(\"\")\n style = plotItem.getStyle(scan)\n self.__updateScatter(style)\n else:\n self.setToolTip(\"\")\n style = plotItem.getStyle(scan)\n color = self.getQColor(style.lineColor)\n if style.symbolStyle is not style_model.SymbolStyle.NO_SYMBOL:\n symbolStyle = style_model.symbol_to_silx(style.symbolStyle)\n self.__legend.setSymbol(symbolStyle)\n if style.symbolColor is None:\n self.__legend.setSymbolColor(qt.QColor(0xE0, 0xE0, 0xE0))\n else:\n symbolColor = self.getQColor(style.symbolColor)\n self.__legend.setSymbolColor(symbolColor)\n self.__legend.setSymbolColormap(style.colormapLut)\n if isinstance(style.lineStyle, str):\n lineStyle = style.lineStyle\n elif style.lineStyle == style_model.LineStyle.NO_LINE:\n lineStyle = \" \"\n elif style.lineStyle == style_model.LineStyle.SCATTER_SEQUENCE:\n lineStyle = \"-\"\n self.__legend.setLineColor(color)\n self.__legend.setLineStyle(lineStyle)\n self.__legend.setLineWidth(1.5)\n except Exception as e:\n self.setToolTip(f\"Unknown error
{str(e)}\")\n self.__legend.setLineColor(\"grey\")\n self.__legend.setLineStyle(\":\")\n self.__legend.setLineWidth(1.5)\n self.__legend.update()\n\n\nclass HookedStandardItem(qt.QStandardItem):\n def __init__(self, text: str):\n qt.QStandardItem.__init__(self, text)\n self.modelUpdated: Optional[Callable[[qt.QStandardItem], None]] = None\n\n def setData(self, value, role=qt.Qt.UserRole + 1):\n qt.QStandardItem.setData(self, value, role)\n if self.modelUpdated is None:\n return\n method = self.modelUpdated()\n if method is not None:\n method(self)\n\n\nclass RadioPropertyItemDelegate(qt.QStyledItemDelegate):\n def __init__(self, parent):\n qt.QStyledItemDelegate.__init__(self, parent=parent)\n\n def createEditor(self, parent, option, index):\n if not index.isValid():\n return super(RadioPropertyItemDelegate, self).createEditor(\n parent, option, index\n )\n\n # Create group to avoid interferences\n editor = qt.QWidget(parent=parent)\n editor.setContentsMargins(1, 1, 1, 1)\n layout = qt.QHBoxLayout(editor)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(1)\n\n radio = qt.QRadioButton(parent=editor)\n radio.setObjectName(\"radio\")\n radio.setAutoExclusive(False)\n radio.clicked.connect(self.__editorsChanged)\n radio.setMinimumSize(radio.minimumSizeHint())\n radio.setMaximumSize(radio.minimumSizeHint())\n layout.addWidget(radio)\n\n editor.setSizePolicy(qt.QSizePolicy.Fixed, qt.QSizePolicy.Fixed)\n self.setEditorData(editor, index)\n return editor\n\n def __editorsChanged(self):\n editor = self.sender().parent()\n self.commitData.emit(editor)\n\n def setEditorData(self, editor: qt.QWidget, index):\n radio = editor.findChildren(qt.QRadioButton, \"radio\")[0]\n data = index.data(role=RadioRole)\n with utils.blockSignals(radio):\n if data is None:\n radio.setVisible(False)\n elif data == qt.Qt.Checked:\n radio.setVisible(True)\n radio.setChecked(True)\n elif data == qt.Qt.Unchecked:\n radio.setVisible(True)\n radio.setChecked(False)\n else:\n _logger.warning(\"Unsupported data %s\", data)\n\n def setModelData(self, editor, model, index):\n radio = editor.findChildren(qt.QRadioButton, \"radio\")[0]\n data = qt.Qt.Checked if radio.isChecked() else qt.Qt.Unchecked\n model.setData(index, data, role=RadioRole)\n\n def updateEditorGeometry(self, editor, option, index):\n # Center the widget to the cell\n size = editor.sizeHint()\n half = size / 2\n halfPoint = qt.QPoint(half.width(), half.height() - 1)\n pos = option.rect.center() - halfPoint\n editor.move(pos)\n","repo_name":"marc2332/bliss","sub_path":"bliss/flint/widgets/delegates.py","file_name":"delegates.py","file_ext":"py","file_size_in_byte":25833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"17680208983","text":"\nimport os\nimport sys\nimport yaml\nimport tempfile\nimport subprocess\n\nfrom quarto.quarto import find_quarto\n\ndef render(input,\n output_format = None,\n output_file = None,\n execute = True,\n execute_params = None,\n execute_dir = None,\n cache = None,\n cache_refresh = False,\n kernel_keepalive = None,\n kernel_restart = False,\n debug = False,\n quiet = False,\n pandoc_args = None):\n\n # params file to remove after render\n params_file = None\n\n # build args\n args = [\"render\", input]\n \n if output_format is not None:\n args.extend([\"--to\", output_format])\n \n if output_file is not None:\n args.extend([\"--output\", output_file])\n \n if execute is not None:\n if execute is True:\n args.append(\"--execute\")\n elif execute is False:\n args.append(\"--no-execute\")\n \n if execute_params is not None:\n params_file = tempfile.NamedTemporaryFile(mode = 'w',\n prefix=\"quarto-params\", \n suffix=\".yml\",\n delete=False)\n yaml.dump(execute_params, params_file)\n params_file.close()\n args.extend([\"--execute-params\", params_file.name]) \n\n if execute_dir is not None:\n args.extend([\"--execute-dir\", execute_dir])\n\n if cache is not None:\n if cache is True:\n args.append(\"--cache\")\n elif cache is False:\n args.append(\"--no-cache\")\n \n if cache_refresh is True:\n args.append(\"--cache-refresh\")\n\n if kernel_keepalive is not None:\n args.extend([\"--kernel-keepalive\", str(kernel_keepalive)])\n \n if kernel_restart is True:\n args.append(\"--kernel-restart\")\n\n if debug is True:\n args.append(\"--debug\")\n \n if quiet is True:\n args.append(\"--quiet\")\n \n # run process\n try:\n process = subprocess.Popen([find_quarto()] + args)\n process.wait()\n finally:\n if params_file is not None:\n os.remove(params_file.name)\n\n","repo_name":"quarto-dev/quarto-python","sub_path":"quarto/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"43"} +{"seq_id":"42818339556","text":"import math\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass LinearClassifier(nn.Module):\n\n def __init__(self, args):\n super(LinearClassifier, self).__init__()\n self.device = args.device\n self.batch_size = args.batch_size\n self.input_dimension = args.input_dimension\n self.hidden_dimension = args.hidden_dimension\n self.output_dimension = args.output_dimension\n\n # Classifier\n self.linear_1 = nn.Linear(self.input_dimension, self.hidden_dimension)\n self.linear_2 = nn.Linear(self.hidden_dimension, self.output_dimension)\n\n def forward(self, x):\n # Pass through layers\n x1 = F.relu(self.linear_1(x))\n x2 = self.linear_2(x1)\n\n return x2\n\n\nclass ConvolutionalClassifier(nn.Module):\n\n def __init__(self, args):\n super(ConvolutionalClassifier, self).__init__()\n self.device = args.device\n self.batch_size = args.batch_size\n self.input_dimension = args.input_dimension\n self.hidden_dimension = args.hidden_dimension\n self.output_dimension = args.output_dimension\n\n # Calculate parameters\n self.image_dimension = int(math.sqrt(self.input_dimension))\n\n # Classifier\n self.conv_1 = nn.Conv2d(1, 8, kernel_size=5)\n self.conv_2 = nn.Conv2d(8, 16, kernel_size=5)\n self.linear_1 = nn.Linear(16 * (self.image_dimension - 8) ** 2,\n self.output_dimension)\n\n def forward(self, x):\n # Reshape into image\n x = x.view(self.batch_size, 1,\n self.image_dimension,\n self.image_dimension)\n\n # Pass through layers\n x1 = F.relu(self.conv_1(x))\n x2 = F.relu(self.conv_2(x1))\n\n # Flatten\n x2 = x2.view(self.batch_size, -1)\n x3 = self.linear_1(x2)\n\n return x3\n","repo_name":"franciscovalentecastro/Proyecto-Optimizacion","sub_path":"net-mnist/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"28579661233","text":"def math_process(x,y):\n try:\n result = x/y * (3+x) / (4)\n if result < 0:\n raise Exception\n except ZeroDivisionError:\n return \"You can't divide by zero\"\n except Exception:\n return \"Something went wrong with the result\"\n\n\nif __name__ == \"__main__\":\n print(math_process(-2, 5))","repo_name":"suhaib079/amman-python-401d4","sub_path":"class-04/code_review/exceptions_review.py","file_name":"exceptions_review.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"43"} +{"seq_id":"20271340684","text":"import PyQt5.QtCore as qtc\nimport PyQt5.QtGui as qtg\nimport PyQt5.QtWidgets as qtw\n\nfrom application_gui.common_gui_functions import CLabel, CHorizontalSeparator\n\nfrom application_gui.manage_trackers.functions import TrackerManagerFunctions\n\n##-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\\n## WINDOW FOR READING METADATA\n##-/-/-/-/-/-/-/-/-/-/-/-/-/-/\n\nclass TrackerManagerWindow(qtw.QMainWindow, TrackerManagerFunctions):\n def __init__(self, parent):\n super(TrackerManagerWindow, self).__init__(parent)\n\n # Initialise the subwindow\n self.parent = parent\n self.setWindowModality(qtc.Qt.ApplicationModal)\n\n # Generate the window\n self.mainWidget = qtw.QWidget()\n self.mainLayout = qtw.QVBoxLayout(self.mainWidget)\n self.setWindowTitle(\"Manage Trackers\")\n\n # Populate the panel\n self.createTableWidget(self.mainLayout)\n #self.mainLayout.addWidget( CHorizontalSeparator() )\n self.createUserActions(self.mainLayout)\n\n # Display the panel\n self.mainWidget.setLayout(self.mainLayout)\n self.setCentralWidget(self.mainWidget)\n self.show()\n self.setFixedSize(350,275)\n\n # ---------------------------------------------------\n # Reinitialise the display when the window is closed\n def closeEvent(self, event=None):\n event.accept()\n self.parent.subWindows['tracker_manager'] = None\n\n ##-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\\n ## GENERATE THE DISPLAY\n ##-/-/-/-/-/-/-/-/-/-/\n\n # -------------------------------------------\n # Generate the table display for the trackers\n def createTableWidget(self, parentWidget):\n\n # Generate the widget\n self.trackerSettingsWidget = qtw.QWidget()\n self.trackerSettingsLayout = qtw.QVBoxLayout(self.trackerSettingsWidget)\n\n # Generate the table of servers\n self.trackersTable = qtw.QTableWidget(0, 4)\n self.trackersTable.setHorizontalHeaderLabels( ['', 'Name', '', ''] )\n\n self.trackersTable.setSelectionMode(qtw.QAbstractItemView.NoSelection)\n self.trackersTable.setEditTriggers(qtw.QAbstractItemView.NoEditTriggers)\n\n self.trackersTable.setShowGrid(False)\n self.trackersTable.setMinimumHeight(100)\n self.trackerSettingsLayout.addWidget(self.trackersTable)\n\n # Populate the widget\n self.fillTrackerTable()\n\n # Display the widget\n self.trackerSettingsWidget.setLayout(self.trackerSettingsLayout)\n parentWidget.addWidget(self.trackerSettingsWidget)\n\n # ----------------------------------\n # Generate the controls for the user\n def createUserActions(self, parentWidget):\n\n # Generate the widget\n self.userActionWidget = qtw.QWidget()\n self.userActionLayout = qtw.QGridLayout(self.userActionWidget)\n\n # Add the button to import a tracker\n current_row = 0\n self.importButton = qtw.QPushButton(\"Import\")\n self.importButton.clicked.connect(self.importTracker)\n self.importButton.setStatusTip(\"Import a tracker from a file.\")\n self.importButton.setFixedWidth(125)\n self.userActionLayout.addWidget(self.importButton, current_row, 0)\n\n # Add the button to create a new tracker\n self.newButton = qtw.QPushButton(\"New\")\n self.newButton.clicked.connect(self.makeNewTracker)\n self.newButton.setStatusTip(\"Create a new tracker.\")\n self.newButton.setFixedWidth(125)\n self.userActionLayout.addWidget(self.newButton, current_row, 1)\n\n # Add the button to export a tracker\n current_row += 1\n self.exportButton = qtw.QPushButton(\"Export\")\n self.exportButton.clicked.connect(self.exportTracker)\n self.exportButton.setStatusTip(\"Export a tracker to a file.\")\n self.exportButton.setFixedWidth(125)\n self.userActionLayout.addWidget(self.exportButton, current_row, 0)\n\n # Add the button to close\n self.closeButton = qtw.QPushButton(\"Close\")\n self.closeButton.clicked.connect(self.close)\n self.closeButton.setStatusTip(\"Close the current window.\")\n self.closeButton.setFixedWidth(125)\n self.userActionLayout.addWidget(self.closeButton, current_row, 1)\n\n # Display the widget\n self.userActionWidget.setLayout(self.userActionLayout)\n parentWidget.addWidget(self.userActionWidget)\n","repo_name":"vivien-walter/iscan","sub_path":"source/src/main/python/application_gui/manage_trackers/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"36637445977","text":"import tkinter as tk\nimport os\nfrom openpyxl import *\n\nwindow=tk.Tk()\n\nwindow.title(\"Klasör Güncelle\")\nwindow.geometry(\"650x450\")\n\nlabel1=tk.Label(window,text=\"Proje excel dosyalarının bulunduğu dizini giriniz\")\nlabel1.pack()\n\nentry1=tk.Entry(window,bd=5)\nentry1.pack()\n\nlabel2=tk.Label(window,text=\"Proje kodunu giriniz\")\nlabel2.pack()\n\nentry2=tk.Entry(window,bd=5)\nentry2.pack()\n\nlist_harfler=[\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"İ\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"R\",\"S\",\"T\",\"U\",\"V\",\"Y\",\"Z\"]\n\ndef buton():\n wb1=Workbook()\n filepath=entry1.get() + \"\\\\\" + \"Sipariş_Çalışma_Hazırlığı_\" + entry2.get() + \".xlsx\" #Proje kodu bilerek sonda.For dönerken veri aktarılacak excel dosyası almaması için. \n wb1.save(filepath)\n wb1.close()\n\n wb4=load_workbook(filepath)\n sheet=wb4.active\n sheet.append((\"Ürün Kodu\",\"İndis\",\"Malzeme Tanım \",\"Kalınlık\",\"Boya\",\"En\",\"Boy\",\"Parça (metrekare) \",\"Boşaltma (metrekare) \",\"Hammadde Cinsi\",\"Büküm\",\"Saç Plaka En\",\"Saç Plaka Boy\",\"Yerleşim Sayısı\",\"İşleme Süresi\",\"Hurda Ağırlık\"))\n wb4.save(filepath)\n wb4.close()\n\n os.chdir(entry1.get())\n dosyalar_list=os.listdir(entry1.get())\n for i in dosyalar_list:\n dosya_split=i.split(\"_\")\n if dosya_split[0]== entry2.get():\n wb2=load_workbook(entry1.get() + \"\\\\\" + i)\n ws=wb2.active\n sütun_urun_kodu= ws[\"B11\"].value\n sütun_urun_kodu_new=sütun_urun_kodu[0:7]\n \n if sütun_urun_kodu[7] in list_harfler:\n sütun_indis=sütun_urun_kodu[7]\n else:\n sütun_indis=\"BOŞ\"\n\n sütun_kalınlık=ws[\"B3\"].value\n sütun_kalınlık_new=sütun_kalınlık.split(\" \")\n sütun_kalınlık_new_1=sütun_kalınlık_new[1][1:]\n sütun_en=ws[\"F11\"].value\n sütun_boy=ws[\"E11\"].value\n sütun_hammadde_cinsi=ws[\"B3\"].value\n sütun_sac_plaka_en=ws[\"B4\"].value\n sütun_sac_plaka_en_new=sütun_sac_plaka_en.split(\" \")\n sütun_sac_plaka_en_new_1=sütun_sac_plaka_en_new[2]\n sütun_sac_plaka_boy=sütun_sac_plaka_en_new[0] \n sütun_yerlesim_sayısı=ws[\"H11\"].value\n sütun_isleme_süresi=ws[\"B5\"].value\n sütun_hurda_agırlık=ws[\"D7\"].value\n wb2.close()\n\n wb3=load_workbook(filepath)\n sheet=wb3.active\n sheet.append((sütun_urun_kodu_new,sütun_indis,\" \",sütun_kalınlık_new_1,\" \",sütun_en,sütun_boy,\" \",\" \",sütun_hammadde_cinsi,\" \",sütun_sac_plaka_en_new_1,sütun_sac_plaka_boy,sütun_yerlesim_sayısı,sütun_isleme_süresi,sütun_hurda_agırlık))\n wb3.save(filepath)\n wb3.close()\n\nbutton=tk.Button(window,text=\"Gönder\",command=buton)\nbutton.pack()\n\nwindow.mainloop()","repo_name":"bilalgelincik/excel_project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35397307524","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\nimport logging\nfrom IntentClassification.Parameters import logger\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import f1_score\nfrom IntentClassification import Parameters\nimport json\nfrom IntentClassification.Parameters import logger\n\n# To create train test split and save to file\ndef createTestData():\n logger.info ('Starting training')\n xl = pd.ExcelFile( Parameters.data_Path + Parameters.dataFileName)\n\n Training_Matrix = []\n Training_Label = []\n Training_Label_Str = []\n \n logger.info ('Reading training data')\n \n for sheet in xl.sheet_names:\n## Load a sheet into a DataFrame by name: df\n df = xl.parse(sheet)\n for value in df.values:\n Training_Matrix.append(str(value))\n Training_Label_Str.append(sheet)\n\n le = LabelEncoder()\n Training_Label = list(le.fit_transform(Training_Label_Str))\n logger.info (len (Training_Matrix))\n logger.info (len (Training_Label))\n\n x_train = np.array (Training_Matrix)\n y_train = np.array (Training_Label)\n y_train_str = np.array (Training_Label_Str)\n \n logger.info ('Split training data')\n sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=0)\n for train_index, test_index in sss.split(x_train, y_train):\n logger.debug(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train_new, X_test_new = x_train[train_index], x_train[test_index]\n y_train_new, y_test_new = y_train[train_index], y_train[test_index]\n y_train_str, y_test_str = y_train_str[train_index], y_train_str[test_index]\n\n logger.info ('Save training data')\n df = pd.DataFrame(\n {Parameters.dataColumn: X_train_new,\n Parameters.labelColumn: y_train_new,\n Parameters.labelStrColumn: y_train_str\n })\n \n df.to_excel (Parameters.data_Path + Parameters.trainFileName)\n\n logger.info ('Save testing data')\n df = pd.DataFrame(\n {Parameters.dataColumn: X_test_new,\n Parameters.labelColumn: y_test_new,\n Parameters.labelStrColumn: y_test_str\n })\n df.to_excel (Parameters.data_Path + Parameters.testFileName)\n\n logger.info ('Completed create TestData')\n\n# To download model\ndef downLoad_NNLM_Model():\n\n os.environ['http_proxy'] = Parameters.http_proxy \n os.environ['HTTP_PROXY'] = Parameters.http_proxy\n os.environ['https_proxy'] = Parameters.https_proxy\n os.environ['HTTPS_PROXY'] = Parameters.https_proxy\n\n embed = hub.Module(Parameters.module_spec_Url)\n\n# To get predictions\ndef get_predictions(estimator, input_fn):\n return [x[\"class_ids\"][0] for x in estimator.predict(input_fn=input_fn)]\n\n# To train model\ndef trainModel (): \n#\n# Reduce logging output.\n tf.logging.set_verbosity(tf.logging.DEBUG)\n\n log = logging.getLogger('tensorflow')\n log.setLevel(logging.DEBUG)\n\n# create formatter and add it to the handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# create file handler which logs even debug messages\n fh = logging.FileHandler(Parameters.logpath + Parameters.tensorFlowLogFile)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n log.addHandler(fh)\n\n\n train_data = pd.read_excel(Parameters.data_Path + Parameters.trainFileName)\n train_x = pd.DataFrame(train_data [Parameters.dataColumn])\n train_y = pd.to_numeric(train_data [Parameters.labelColumn])\n \n\n sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)\n for train_index, test_index in sss.split(train_x, train_y):\n log.debug(\"TRAIN:\" + str(train_index) + \"TEST:\" + str( test_index))\n X_train_new, X_test_new = train_x.iloc[train_index], train_x.iloc[test_index]\n y_train_new, y_test_new = train_y.iloc[train_index], train_y.iloc[test_index]\n \n # Training input on the whole training set with no limit on training epochs.\n train_input_fn = tf.estimator.inputs.pandas_input_fn(\n X_train_new, y_train_new, num_epochs=None, shuffle=True)\n\n# Prediction on the whole training set.\n eval_train_input_fn = tf.estimator.inputs.pandas_input_fn(\n X_train_new, y_train_new, shuffle=False)\n# Prediction on the test set.\n eval_test_input_fn = tf.estimator.inputs.pandas_input_fn(\n X_test_new, y_test_new, shuffle=False)\n\n embedded_text_feature_column = hub.text_embedding_column(\n key=Parameters.dataColumn, \n module_spec=Parameters.module_Spec_Path)\n\n\n estimator = tf.estimator.DNNClassifier(\n hidden_units=[500, 100],\n feature_columns=[embedded_text_feature_column],\n n_classes=Parameters.n_classes,\n optimizer=tf.train.AdagradOptimizer(learning_rate=0.003), model_dir=Parameters.model_dir)\n\n# Training for 1,000 steps means 128,000 training examples with the default\n# batch size. This is roughly equivalent to 5 epochs since the training dataset\n# contains 25,000 examples.\n log.info ('Training chatbot')\n estimator.train(input_fn=train_input_fn, steps=3000);\n\n log.info ('Evaluating chatbot')\n\n evaluateModel (eval_train_input_fn, eval_test_input_fn, y_test_new, estimator);\n\n testModel (estimator)\n\n return estimator\n\n# To evaluate model\ndef evaluateModel (eval_train_input_fn, eval_test_input_fn, y_test_new, estimator):\n train_eval_result = estimator.evaluate(input_fn=eval_train_input_fn)\n test_eval_result = estimator.evaluate(input_fn=eval_test_input_fn)\n\n logger.info( \"Training set accuracy: {accuracy}\".format(**train_eval_result))\n logger.info( \"Test set accuracy: {accuracy}\".format(**test_eval_result))\n\n with tf.Graph().as_default():\n \n pred = get_predictions(estimator, eval_test_input_fn)\n cm = tf.confusion_matrix(y_test_new, pred)\n \n with tf.Session() as session:\n cm_out = session.run(cm)\n\n# Normalize the confusion matrix so that each row sums to 1.\n cm_out = cm_out.astype(float) / cm_out.sum(axis=1)[:, np.newaxis]\n\n sns.heatmap(cm_out, annot=True, xticklabels=Parameters.LABELS, yticklabels=Parameters.LABELS);\n plt.xlabel(\"Predicted\");\n plt.ylabel(\"True\");\n\n logger.info (\"\\nPrecision: ->\" + str(precision_score(y_test_new, pred, average='micro')))\n logger.info (\"\\nRecall: ->\" + str(recall_score(y_test_new, pred, average='micro')))\n logger.info (\"\\nf1_score: ->\"+ str(f1_score(y_test_new, pred, average='micro')))\n\n\n# To test model\ndef testModel (estimator):\n\n test_data = pd.read_excel(Parameters.data_Path + Parameters.testFileName)\n test_x = pd.DataFrame(test_data [Parameters.dataColumn])\n test_y = pd.DataFrame(test_data [Parameters.labelColumn])\n\n# Prediction on the test set.\n predict_test_input_fn = tf.estimator.inputs.pandas_input_fn(\n test_x, test_y, shuffle=False)\n\n\n test_pred_result = estimator.evaluate(input_fn=predict_test_input_fn)\n logger.info( \"Test set accuracy: {accuracy}\".format(**test_pred_result))\n\n test_pred_result = estimator.predict(input_fn=predict_test_input_fn)\n\n pred = []\n for result in test_pred_result:\n pred.append (result ['class_ids'])\n\n pred_y = pd.DataFrame(pred)\n\n result_df = pd.concat([test_x, test_y, pred_y], axis=1, ignore_index=True)\n \n i = 0\n for index, row in result_df.iterrows():\n if int ( (row[1])) != int ( (row[2])) :\n logger.debug ( str(row[0]) + ' : ' + str(row[2]))\n i = i +1\n\n logger.info ('Incorrect predictions:' + str(i))\n\n# To create estimator object\ndef make_estimator(model_dir):\n\n logger.info ('make_estimator')\n config = tf.estimator.RunConfig (model_dir=model_dir)\n embedded_text_feature_column = hub.text_embedding_column(\n key=Parameters.dataColumn, \n module_spec=Parameters.module_Spec_Path)\n logger.info (embedded_text_feature_column)\n\n estimator = tf.estimator.DNNClassifier(config = config,\n hidden_units=[500, 100],\n feature_columns=[embedded_text_feature_column],\n n_classes=Parameters.n_classes,\n optimizer=tf.train.AdagradOptimizer(learning_rate=0.003))\n \n\n return estimator\n\n# To predict user input class\ndef predictInput ( inputString):\n \n result = None\n from IntentClassification.apps import IntentClassificationConfig\n estimator = IntentClassificationConfig.get_EstimatorObject()\n \n predict_test_input_fn = tf.estimator.inputs.pandas_input_fn(\n pd.DataFrame([inputString], columns = [Parameters.dataColumn]), shuffle=False)\n predictions = estimator.predict(input_fn=predict_test_input_fn)\n logger.info (predictions)\n for prediction in predictions:\n logger.info( prediction)\n logger.info (np.max(prediction[Parameters.prob]))\n result = json.dumps ({Parameters.Label : Parameters.LABELS[prediction [Parameters.class_id][0]], Parameters.probability: str(np.max(prediction[Parameters.prob]))})\n logger.info('Prediction:'+ result)\n\n return result;\n","repo_name":"gauravghid/IntentIdentifcation","sub_path":"IntentClassification/Text_Classify_tfhub.py","file_name":"Text_Classify_tfhub.py","file_ext":"py","file_size_in_byte":8944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11946834324","text":"\nimport numpy\nimport scipy,scipy.io\n\nmodel_name = 'places/CNN1/'\nw2vfile = '.../GoogleNews-vectors-negative300.bin'\n\nspellcheck_path = ''\ncaption_path = ''\n\npath_in = ''\npath_out = '' \n\n###############################################################################\n # Calling Embeddings #\n############################################################################### \n\nfilename = 'sorted_dist_top.npy'\nsorted_dist_top = numpy.load(path_in + filename )\n\nfilename = 'sorted_dist_worst.npy'\nsorted_dist_worst = numpy.load(path_in + filename ) \n\nfilename = 'sorted_dist_rand.npy' \nsorted_dist_rand = numpy.load(path_in + filename )\n\n###############################################################################\n # captions #\n############################################################################### \n \nfilename = 'places_testset_spellcheck.mat' \ndata = scipy.io.loadmat(spellcheck_path + filename,\n variable_names=['ind_correct']) \n\nind_correct = data['ind_correct'][0] \n\n################################################################################\n# # Selected wav files + captions #\n################################################################################\n\nmetadata_file = caption_path + 'places_testset_captions.mat'\ndata = scipy.io.loadmat(metadata_file) \ncaptions = data['captions']\n\ncaptions = captions[ind_correct]\n\ndef extract_captions (target):\n unit_list = []\n for utterance in target:\n utterance = (utterance.strip()) \n unit_list.append((utterance)) \n return unit_list \n\ncaption_list = extract_captions(captions)\n\n\n###############################################################################\n # removing first element from top list #\n###############################################################################\n \nsorted_dist_top = sorted_dist_top [:, 1:]\n\n###############################################################################\n # Finding top similar utterances based on Embedded layer #\n###############################################################################\ntop_counts = 50\ndict_top = []\nfor reference_ind in range(len(sorted_dist_top)):\n reference_utterance = caption_list [reference_ind]\n dict_reference = []\n dict_reference.append(reference_utterance)\n\n for j in range(top_counts):\n candidate_utterance = caption_list [sorted_dist_top [reference_ind,j]]\n dict_reference.append(candidate_utterance)\n\n\n dict_top.append(dict_reference) \n\nlow_counts = 50\ndict_low = []\nfor reference_ind in range(len(sorted_dist_worst)):\n reference_utterance = caption_list [reference_ind]\n dict_reference = []\n dict_reference.append(reference_utterance)\n\n for j in range(low_counts):\n candidate_utterance = caption_list [sorted_dist_worst [reference_ind,j]]\n dict_reference.append(candidate_utterance)\n\n\n dict_low.append(dict_reference) \n \nrand_counts = 50\ndict_rand = []\nfor reference_ind in range(len(sorted_dist_rand)):\n reference_utterance = caption_list [reference_ind]\n dict_reference = []\n dict_reference.append(reference_utterance)\n\n for j in range(rand_counts):\n candidate_utterance = caption_list [sorted_dist_rand [reference_ind,j]]\n dict_reference.append(candidate_utterance)\n\n\n dict_rand.append(dict_reference) \n \n\n###############################################################################\n # Saving Results as Dictionaries #\n###############################################################################\n \nfilename = 'dict_top' \nnumpy.save( path_out + filename , dict_top )\n\nfilename = 'dict_low' \nnumpy.save( path_out + filename , dict_low )\n\n\nfilename = 'dict_rand' \nnumpy.save( path_out + filename , dict_rand )\n\n\n\n\n\n\n\n \n","repo_name":"SPEECHCOG/VGS_XSL","sub_path":"sr_analyses/2_save_sorted_utt.py","file_name":"2_save_sorted_utt.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"16064678252","text":"#!/usr/bin/env python3\nimport os\nimport re\n\nimport requests\nfrom bs4 import BeautifulSoup as BS\nfrom typing import Dict\n\n\nclass Publication():\n def __init__(self, pmid: str, configuration: Dict[str, str]) -> None:\n \"\"\"\n Publication storing object.\n\n Args:\n pmid (str): PMID of the publication.\n configuration (Dict[str, str]): Global configuration.\n \"\"\"\n # Store configuration\n self.configuration = configuration\n # Get publication types\n self.pth = self.configuration[\"has\"][\"publication_type\"].split(\";\")\n self.pmid = pmid\n\n def get_data(self) -> None:\n \"\"\"\n Retrieve all fields to get a complete publication object.\n \"\"\"\n api = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&retmode=xml&id={}'.format(\n self.pmid)\n data = requests.get(api).text\n data = BS(data, 'lxml')\n # Get DOI\n try:\n self.doi = data.find('articleid', attrs={'idtype': 'doi'}).text\n except AttributeError:\n self.doi = None\n # Get journal name\n try:\n self.journal = data.isoabbreviation.text\n except AttributeError:\n self.journal = None\n # Get year\n try:\n self.year = data.pubdate.year.text\n except AttributeError:\n try:\n self.year = data.datecompleted.year.text\n except AttributeError:\n try:\n self.year = data.articledate.year.text\n except AttributeError:\n self.year = None\n # Get month\n try:\n self.month = data.pubdate.month.text\n except AttributeError:\n try:\n self.month = data.datecompleted.month.text\n except AttributeError:\n try:\n self.month = data.articledate.month.text\n except AttributeError:\n self.month = None\n # Get day\n try:\n self.day = data.pubdate.day.text\n except AttributeError:\n try:\n self.day = data.datecompleted.day.text\n except AttributeError:\n try:\n self.day = data.articledate.day.text\n except AttributeError:\n self.day = None\n # Format pubdate\n self.pubdate = '{}/{}/{}'.format(self.year, self.month, self.day)\n # Get title\n self.title = data.articletitle.text\n # Parse authors\n try:\n authors = data.find_all('author', attrs={'validyn': 'Y'})\n self.authors = ', '.join([\n '{} {}'.format(author.lastname.text, author.initials.text)\n for author in authors\n ])\n self.first_author = '{} {}: {}'.format(\n authors[0].lastname.text, authors[0].initials.text,\n authors[0].affiliationinfo.affiliation.text)\n del authors\n except (AttributeError, IndexError):\n self.authors = None\n self.first_author = None\n # Get URL\n self.url = 'https://www.ncbi.nlm.nih.gov/pubmed/{}'.format(self.pmid)\n # Get pubtype\n pub_types = data.find_all('publicationtype')\n self.publication_type = ', '.join([\n pub_type.text for pub_type in pub_types\n if pub_type.text in self.pth\n ])\n del pub_types\n # Get associated MeSH terms\n meshs = data.find_all('meshheading')\n self.mesh_majors = list()\n self.mesh_all = list()\n for mesh in meshs:\n try:\n self.mesh_all.append(mesh.descriptorname.text)\n if mesh.descriptorname['majortopicyn'] == 'Y':\n self.mesh_majors.append(mesh.descriptorname.text)\n except Exception:\n continue\n self.mesh_majors = '; '.join(self.mesh_majors)\n self.mesh_all = '; '.join(self.mesh_all)\n del meshs\n # Get abstract\n try:\n self.abstract = (re.sub('\\n', '', data.abstract.text))\n except AttributeError:\n self.abstract = None\n\n def write_data(self, file_path: str) -> None:\n \"\"\"\n Write the publication object as a line in output file.\n\n Args:\n filename (str): Path to output file.\n \"\"\"\n with open(file_path, 'a') as h:\n h.write(\n 'https://www.ncbi.nlm.nih.gov/pubmed/{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'\n .format(self.pmid, self.mesh_majors, self.title, self.abstract,\n self.authors, self.first_author, self.publication_type,\n self.year, self.pubdate, self.journal, ''))\n","repo_name":"MrMimic/MedHSS_datalake","sub_path":"src/main/python/publication/publication.py","file_name":"publication.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"42367969005","text":"import asyncio\nimport json\n\nfrom starlette.endpoints import HTTPEndpoint, WebSocketEndpoint\nfrom starlette.responses import FileResponse\n\n\nclass Homepage(HTTPEndpoint):\n async def get(self, request):\n return FileResponse(\"static/index.html\")\n\n\nclass Websocket(WebSocketEndpoint):\n encoding = \"json\"\n\n async def on_connect(self, ws):\n app = ws.app\n logger = app.state.logger\n cache = app.state.cache\n\n # add redis stuff\n pubsub = app.state.redis.pubsub()\n ws.state.channel = ws.query_params[\"channel\"]\n await pubsub.subscribe(**{ws.state.channel: get_handler(ws)})\n ws.state.pubsub = pubsub\n\n # create pub sub task\n ws.state.pubsub_task = asyncio.create_task(\n ws.state.pubsub.run(poll_timeout=0.01)\n )\n\n await ws.accept()\n\n messages = await cache.get(ws.state.channel)\n if messages:\n for msg in messages:\n await ws.send_text(json.dumps(msg))\n await cache.delete(ws.state.channel)\n\n async def on_receive(self, ws, data):\n app = ws.app\n logger = app.state.logger\n redis = app.state.redis\n cache = app.state.cache\n\n to = data.pop(\"to\")\n data[\"from\"] = ws.state.channel\n\n numsub = await redis.pubsub_numsub(to)\n if numsub[0][1]:\n await redis.publish(to, json.dumps(data))\n\n else:\n messages = await cache.get(to) or []\n messages.append(data)\n await cache.set(to, messages)\n\n async def on_disconnect(self, ws, close_code):\n ws.state.pubsub_task.cancel()\n await ws.state.pubsub.unsubscribe(ws.state.channel)\n # await ws.close(code=close_code)\n\n\ndef get_handler(ws):\n async def handler(message):\n wsmsg = message[\"data\"].decode()\n await ws.send_text(wsmsg)\n\n return handler\n","repo_name":"rykroon/webrtc-signal-server","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"43"} +{"seq_id":"41789188685","text":"import logging\nimport boto3\nfrom botocore.exceptions import ClientError\n\ns3_client = boto3.client('s3')\n\n\ndef lambda_handler(event, context):\n bucket_name = \"fajjarnr-report\"\n object_name = \"report.html\"\n expiration_in_seconds = 120\n\n try:\n presigned_url_str = s3_client.generate_presigned_url('get_object', Params={\n 'Bucket': bucket_name, 'Key': object_name}, ExpiresIn=expiration_in_seconds)\n response = {\"presigned_url_str\": presigned_url_str}\n except ClientError as e:\n logging.error(e)\n return None\n\n # The response contains the presigned URL\n return response\n","repo_name":"fajjarnr/myskincare","sub_path":"lambda/generate_presign_url.py","file_name":"generate_presign_url.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74486731008","text":"import string\n\n\"\"\"\n12.12.12.12\n12.122.12.12\n12.12.122.12\n12.12.12.122\n122.12.12.12\n122.122.12.12\n122.122.122.12\n122.122.122.122\n\"\"\"\n\ndef invalid_ip():\n\tprint(\"[!] invalid Addr\")\n\ndef IPv4_validator():\n\tsetter = False\n\twhile setter != True:\n\t\tusr_inpt = input(\"@set-IPv4: \")\n\t\tis_greater = False\n\t\t# check to ensure all octets are less than length of 4\n\t\tfor octet in usr_inpt.split(\".\"):\n\t\t\tif len(octet) > 3:\n\t\t\t\tis_greater = True\n\t\t\t\tbreak\n\t\tif is_greater == True:\n\t\t\tinvalid_ip()\n\t\t\tcontinue\n\t\tif usr_inpt[0] in string.digits:\n\t\t\tif not \".\" in usr_inpt:\n\t\t\t\tinvalid_ip()\n\t\t\telif \"..\" in usr_inpt or \"...\" in usr_inpt:\n\t\t\t\tinvalid_ip()\n\t\t\telse:\n\t\t\t\tif usr_inpt.count(\".\") != 3:\n\t\t\t\t\tinvalid_ip()\n\t\t\t\t\tcontinue\n\t\t\t\tif usr_inpt[0] == \".\" or usr_inpt[1] == \".\":\n\t\t\t\t\tinvalid_ip()\n\t\t\t\telif usr_inpt[2] == \".\":\t# at this stage can only go upto 10\n\t\t\t\t\tif usr_inpt[4] == \".\" or usr_inpt[5] == \".\" or usr_inpt[6] == \".\":\n\t\t\t\t\t\tif usr_inpt[6] == \".\" or usr_inpt[7] == \".\" or usr_inpt[8] == \".\" or usr_inpt[9] == \".\" or usr_inpt[10] == \".\":\n\t\t\t\t\t\t\tsetter = True\n\t\t\t\t\t\t\treturn(usr_inpt)\n\t\t\t\t\t\telse: print(\"invalid point (octet 3)\")\n\t\t\t\t\telse: print(\"invalid point (octet 2)\")\n\t\t\t\telif usr_inpt[3] == \".\":\n\t\t\t\t\tif usr_inpt[5] == \".\" or usr_inpt[6] == \".\" or usr_inpt[7] == \".\":\n\t\t\t\t\t\tif usr_inpt[7] == \".\" or usr_inpt[8] == \".\" or usr_inpt[9] == \".\" or usr_inpt[10] or usr_inpt[11] == \".\":\n\t\t\t\t\t\t\tsetter = True\n\t\t\t\t\t\t\treturn(usr_inpt)\n\t\t\t\t\t\telse: invalid_ip()\n\t\t\t\t\telse: invalid_ip()\n\t\t\t\telse:\n\t\t\t\t\tinvalid()\n\t\telse:\n\t\t\tinvalid()\n\ndef invalid_url(*args):\n\tif args:\n\t\tprint(\"[!] invalid URL: {}\".format(args[0]))\n\telse:\n\t\tprint(\"[!] invalid URL\")\n\ndef read_domains():\n\tvalid_list = []\n\twith open(\"domainList.txt\", \"r\") as f:\n\t\tfor ex in f.readlines():\n\t\t\tvalid_list.append(ex.lower().strip(\"\\n\"))\n\treturn valid_list\n\ndef URL_validator():\n\t# set valid_extensions\n\tvalid_extensions = read_domains()\n\tsetter = False\n\twhile setter != True:\n\t\tusr_inpt = input(\"@set-URL: \")\n\t\tif \".\" not in usr_inpt:\n\t\t\tinvalid_url()\n\t\t\tcontinue\n\t\tif \", \" in usr_inpt:\n\t\t\ttargets = usr_inpt.split(\", \")\n\t\t\tprint(targets)\n\t\t\tfor t in targets:\n\t\t\t\tif \".\" not in t:\n\t\t\t\t\tinvalid_url(t)\n\t\t\t\t\tcontinue\n\t\t\t\tif t.split(\".\")[2] not in valid_extensions:\n\t\t\t\t\tinvalid_url(t)\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tprint(t.split(\".\")[2])\nURL_validator()\n\n","repo_name":"aslamadmani1337/CWAF-2","sub_path":"testingZone/ip_validator.py","file_name":"ip_validator.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"69873387971","text":"# challenge hackerrank url\n# https://www.hackerrank.com/challenges/repeated-string/problem\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the repeatedString function below.\n\n\ndef repeatedString(s, n):\n\n # finding the total no. of a's in the string s\n a_in_s = s.count('a')\n\n # length of string s\n s_len = len(s)\n # if n is divisible by the length of string s\n if n % s_len == 0:\n total_as = a_in_s * (n/s_len)\n return int(total_as)\n # if n is not divisible by the length of string s\n elif n % s_len != 0:\n total_as_1 = a_in_s * (n//s_len)\n # now to calculate the no. of a's in the remaining string\n rem = n % s_len\n total_as_1 += s[:rem].count('a')\n return total_as_1\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n n = int(input())\n\n result = repeatedString(s, n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"dxganta/Algorithmic-Problems","sub_path":"repeated_string.py","file_name":"repeated_string.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11266996085","text":"# LeetCode 946. \n\n\"\"\"\nGiven two integer arrays pushed and popped each with distinct values, return true if this could have been the result of a sequence of push and pop operations on an initially empty stack, or false otherwise.\n\nExample 1:\nInput: pushed = [1,2,3,4,5], popped = [4,5,3,2,1]\nOutput: true\n\nExplanation: We might do the following sequence:\npush(1), push(2), push(3), push(4),\npop() -> 4,\npush(5),\npop() -> 5, pop() -> 3, pop() -> 2, pop() -> 1\n\"\"\"\n\ndef validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:\n stack = []\n j = 0\n \n for i in pushed:\n stack.append(i)\n while(len(stack)>0 and stack[-1] == popped[j]):\n stack.pop()\n j+=1\n \n return len(stack) == 0\n\n\t\ndef validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:\n i = 0\n j = 0\n \n for val in pushed:\n\n pushed[i] = val\n i+=1\n while(i > 0 and pushed[i - 1] == popped[j]):\n i-=1\n j+=1\n \n return i == 0\n\t\n","repo_name":"vedant115/LeetCode","sub_path":"Daily LeetCode Challenge/March/Validate_Stack_Sequences.py","file_name":"Validate_Stack_Sequences.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37746140237","text":"from bs4 import BeautifulSoup\nimport requests\nimport os, sys, re, math\n\n# sorry, I had to use your's, Bryan :'( idk how to create directories or...what this is\nheaders= {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}\nroot_url = \"http://vbpl.vn\"\nargs = [arg for arg in sys.argv[1:] if not arg.startswith(\"--\")] #should be max only 1 arg, relative_dir\nopts = [opt for opt in sys.argv[1:] if opt.startswith(\"--\")]\n\ndef get_dir(args):\n file_dir = os.getcwd()\n if \"--dir\" in opts:\n file_dir += args[0]\n\n #check if given directory exists\n if os.path.isdir(file_dir):\n print(\"Chosen file directory:\", file_dir)\n return file_dir\n else:\n print(\"You have entered an invalid directory. Please try again.\")\n raise SystemExit()\n\ndef make_dirs(dir_list):\n for i in dir_list:\n if not os.path.exists(i):\n os.mkdir(i)\n return\n\nfile_dir = get_dir(args)\nparallel_dir = file_dir + '/Parallel'\nviet_dir = file_dir + '/Vietnamese_Only'\nmake_dirs([parallel_dir, viet_dir])\npar_eng_count, par_viet_count, only_viet_count = 0, 0, 0\n\ndef scrape():\n source = requests.get('http://vbpl.vn/TW/Pages/Home.aspx').text\n soup = BeautifulSoup(source, 'html5lib')\n\n # by promulgator\n categories = soup.find('ul', class_='category', id = \"capCQ\")\n category = categories.find_all('li')\n links = []\n for i in category:\n cat_src = i.find('a')['href']\n cat_id = cat_src.split('/')[3]\n cat_id = cat_id.split('?')[1]\n cat_link = f'http://vbpl.vn/TW/Pages/vanbanTA.aspx?{cat_id}'\n links.append(cat_link)\n\n # by type of documents\n categories = soup.find('ul', class_='category', id = \"loaiVB\")\n category = categories.find_all('li')\n doc_links = []\n for i in category:\n cat_src = i.find('a')['href']\n cat_id = cat_src.split('/')[3]\n cat_id = cat_id.split('?')[1]\n cat_link = f'http://vbpl.vn/TW/Pages/vanbanTA.aspx?{cat_id}'\n doc_links.append(cat_link)\n\n # check if only vn or also got en\n en = soup.find('b', class_='history')\n if en.text == 'VB tiếng anh':\n print('VB tiếng anh') # just to check but im getting an error\nscrape()\n","repo_name":"BryanTZY/law-tests","sub_path":"i_tried.py","file_name":"i_tried.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"3336157856","text":"import utils\r\nimport const\r\nfrom PIL import ImageDraw,ImageFont\r\nimport PIL\r\nfrom wand.image import Image as WandImage\r\nfrom wand.font import Font as WandFont\r\nimport numpy as np\r\n \r\n\r\ndef create_token_backup(name,img,text):\r\n # font_space_frac = 5/16\r\n font_space_frac = 7/16\r\n\r\n # put image on background\r\n res = utils.add_img_to_bg(img,const.TOKEN_BG(),vertical_offset_frac=0.2)#,vertical_space_frac=font_space_frac)\r\n W,H = res.size\r\n \r\n \r\n draw = ImageDraw.Draw(res)\r\n # add ability text\r\n font = ImageFont.truetype(font=const.FONT_PATH(), size=20)\r\n s = font.getsize(name)\r\n # draw.multiline_text(((W-s[0])//2,50),text,font=font,align='right',fill='black',)\r\n font = get_optimal_multiline_font(const.FONT_PATH(),text,H*font_space_frac,im_size=H)\r\n font = ImageFont.truetype(font=const.FONT_PATH(), size=25)\r\n draw_multiline(draw,res.size,font,text)\r\n # add name\r\n # font = ImageFont.truetype(font='dorian2.ttf', size=50)\r\n font = get_optimal_font(const.FONT_PATH(),name,W*7/12)\r\n s = font.getsize(name)\r\n if const.DIRECTION=='rtl':\r\n name = utils.change_dir(name)\r\n draw.text(((W-s[0])//2,int(512*19/24)),name,font=font,align='right' if const.DIRECTION=='rtl' else 'left',fill='black')\r\n\r\n return res\r\ndef add_leaves(character, img,group=None):\r\n firstNight = len(character.get('firstNightReminder',''))>0\r\n\r\n otherNight = len(character.get('otherNightReminder',''))>0\r\n\r\n ability = character.get('ability','')\r\n changeSetup = '[' in ability and ']' in ability\r\n changeSetup = character.get('setup',changeSetup)\r\n \r\n n_reminders = len(character.get('reminders',[]))\r\n\r\n\r\n if firstNight:\r\n left_leaf = const.LEFT_LEAF(group)\r\n img.paste(left_leaf,(0,0),left_leaf)\r\n \r\n if otherNight:\r\n right_leaf = const.RIGHT_LEAF(group)\r\n img.paste(right_leaf,(0,0),right_leaf)\r\n \r\n if changeSetup:\r\n orange_leaf = const.ORANGE_LEAF(group)\r\n img.paste(orange_leaf,(0,0),orange_leaf)\r\n \r\n if n_reminders>0:\r\n top_leaf = const.TOP_LEAF(n_reminders,group)\r\n img.paste(top_leaf,(0,0),top_leaf)\r\n return img\r\n\r\ndef add_name_to_img(name,img,color='black',mode='token',font_path=None):\r\n if font_path is None:\r\n font_path = const.NAME_FONT_PATH()\r\n font = ImageFont.truetype(font_path,size=60)\r\n font_width = font.getsize(name)[0]\r\n r = img.size[0]//2\r\n # degs_per_let = 12\r\n # rmax,rmin,cx,cy,angmin,angmax = 0.8*r,0.55*r,r,r,-degs_per_let*len(name)/2,degs_per_let*len(name)/2\r\n if mode=='token':\r\n let_width = const.LETTER_WIDTH(mode)\r\n rmax,rmin,cx,cy,angmin,angmax = 0.8*r,0.55*r,r,r,-let_width*font_width/2,let_width*font_width/2\r\n elif mode=='reminder':\r\n let_width = const.LETTER_WIDTH(mode)\r\n rmax,rmin,cx,cy,angmin,angmax = 0.95*r,0.7*r,r,r,-let_width*font_width/2,let_width*font_width/2\r\n else:\r\n raise Exception()\r\n with WandImage(width=2*r,height=2*r) as wand_img:\r\n wand_img.background_color = 'transparent'\r\n wand_img.font = WandFont(font_path,60)\r\n wand_img.font_color = color\r\n wand_img.read(filename='label: {} '.format(name))\r\n wand_img.resize(2*r,2*r)\r\n wand_img.virtual_pixel = 'transparent'\r\n # 360 degree arc, rotated -90 degrees\r\n wand_img.distort('polar', (rmax,rmin,cx,cy,angmin,angmax))\r\n\r\n # img.save(filename='arc_text.png')\r\n wand_img.format = 'png'\r\n # display(img)\r\n # img_buffer = np.asarray(bytearray(img.make_blob()), dtype='uint8')\r\n # bytesio = BytesIO(img_buffer)\r\n # img = skimage.io.imread(bytesio)\r\n npa = np.array(wand_img)\r\n # text = wand2pil(img)\r\n # bg.show()\r\n text = PIL.Image.fromarray(npa)\r\n\r\n img.paste(text,(0,0),text)\r\n return img\r\ndef create_token(character,img,group=None):\r\n # font_space_frac = 5/16\r\n # font_space_frac = 7/16\r\n name, text = character['name'],character['ability']\r\n # put image on background\r\n res = utils.add_img_to_bg(img,const.TOKEN_BG(group))#,vertical_space_frac=font_space_frac)\r\n res = add_leaves(character, res, group)\r\n W,H = res.size\r\n \r\n \r\n draw = ImageDraw.Draw(res)\r\n # add ability text\r\n # font = ImageFont.truetype(font=const.FONT_PATH(), size=20)\r\n # s = font.getsize(name)\r\n # draw.multiline_text(((W-s[0])//2,50),text,font=font,align='right',fill='black',)\r\n font = get_optimal_multiline_font(const.FONT_PATH(),text,int((H//2)*13/16),im_size=H)\r\n # font = ImageFont.truetype(font=const.FONT_PATH(), size=const.FONT_SIZE())\r\n draw_multiline(draw,res.size,font,text)\r\n # add name\r\n # font = ImageFont.truetype(font='dorian2.ttf', size=50)\r\n \r\n name = name.upper()\r\n\r\n # font = get_optimal_font(const.NAME_FONT_PATH(),name,W*5/12)\r\n # s = font.getsize(name)\r\n # if const.DIRECTION=='rtl':\r\n # name = utils.change_dir(name)\r\n # draw.text(((W-s[0])//2,int(H*.8)),name,font=font,align='right' if const.DIRECTION=='rtl' else 'left',fill='black')\r\n add_name_to_img(name,res,mode='token')\r\n return res\r\ndef create_token_ch(character,group=None):\r\n if character['id']=='_meta':\r\n return None\r\n \r\n edition = character.get('edition')\r\n is_fabled = character.get('team') == 'fabled'\r\n if is_fabled:\r\n group = 'fabled'\r\n elif edition in const.groups:\r\n group = edition\r\n\r\n \r\n copies = character.get('copies',1)\r\n images = character['image']\r\n if isinstance(images,list):\r\n images = [utils.url_to_image(image).resize((const.IMG_SIZE,const.IMG_SIZE)) for image in images]\r\n tokens = [create_token(character,im,group) for i,im in enumerate(images)]\r\n return [x for x in tokens for i in range(copies)]\r\n image = utils.url_to_image(images).resize((const.IMG_SIZE,const.IMG_SIZE))\r\n token = create_token(character,image,group)\r\n return [token for i in range(copies)]\r\n# def cv2_to_PIL(img):\r\n# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n# im_pil = Image.fromarray(img)\r\n# return im_pil\r\ndef create_reminder(image,reminder):\r\n bg = const.REMINDER_BG()\r\n W,H = bg.size\r\n box = ((W*1)//2,(H*1)//2)\r\n res = utils.add_img_to_bg(image,bg,box=box,from_height=int(H*0.2))#,vertical_space_frac=font_space_frac)\r\n \r\n \r\n draw = ImageDraw.Draw(res)\r\n # add ability text\r\n # s = font.getsize(name)\r\n # draw.multiline_text(((W-s[0])//2,50),text,font=font,align='right',fill='black',)\r\n # font = get_optimal_multiline_font(const.FONT_PATH(),text,H*font_space_frac,im_size=H)\r\n # font = ImageFont.truetype(font=const.FONT_PATH(), size=const.REMINDER_FONT_SIZE())\r\n # draw_multiline(draw,res.size,font,reminder,min_height=H*0.8,color='white')\r\n add_name_to_img(reminder,res,color='white',mode='reminder',font_path=const.FONT_PATH())\r\n # add name\r\n # font = ImageFont.truetype(font='dorian2.ttf', size=50)\r\n \r\n # font = ImageFont.truetype(font=const.FONT_PATH(), size=20)\r\n # name = name.upper()\r\n\r\n # font = get_optimal_font(const.NAME_FONT_PATH(),name,W*5/12)\r\n # s = font.getsize(name)\r\n # if const.DIRECTION=='rtl':\r\n # name = utils.change_dir(name)\r\n # draw.text(((W-s[0])//2,int(H*.8)),name,font=font,align='right' if const.DIRECTION=='rtl' else 'left',fill='black')\r\n # add_name_to_img(name,res)\r\n return res\r\ndef create_reminders_ch(character):\r\n if character['id']=='_meta':\r\n return None\r\n \r\n # edition = character.get('edition')\r\n # is_fabled = character.get('team') == 'fabled'\r\n # if is_fabled:\r\n # group = 'fabled'\r\n # elif edition in const.groups:\r\n # group = edition\r\n\r\n image = character['image']\r\n if isinstance(image,list):\r\n image = image[0]\r\n image = utils.url_to_image(image).resize((const.REMINDER_SIZE,const.REMINDER_SIZE))\r\n reminders = character.get('reminders',[])\r\n reminders = [(x,1) if isinstance(x,str) else tuple(x) for x in reminders]\r\n # if isinstance(reminders,list):\r\n reminders = [[create_reminder(image,reminder)]*copies for reminder,copies in reminders]\r\n # tokens = [create_token(character,im) for i,im in enumerate(reminders)]\r\n return reminders\r\n # image = utils.url_to_image(reminders).resize((const.IMG_SIZE,const.IMG_SIZE))\r\n # token = create_token(character,image,group)\r\n # return [token for i in range(copies)]\r\ndef get_optimal_font_size(font_path,text,width,is_width=True):\r\n for size in range(50,1,-1):\r\n font = ImageFont.truetype(font=font_path, size=size)\r\n s = font.getsize(text)\r\n \r\n if s[is_width] < width:\r\n return size\r\n return 1\r\ndef get_optimal_font(font_path,text,width,is_height=False):\r\n for size in range(50,1,-1):\r\n font = ImageFont.truetype(font=font_path, size=size)\r\n s = font.getsize(text)\r\n \r\n if s[int(is_height)] < width:\r\n break\r\n return font\r\ndef get_optimal_multiline_size(font_path,text,max_height,min_height=60,im_size=512):\r\n for size in range(50,1,-1):\r\n font = ImageFont.truetype(font=font_path, size=size)\r\n if check_multiline_font(font,text,max_height,min_height,im_size):\r\n return size\r\n return 1\r\ndef get_optimal_multiline_font(font_path,text,max_height,min_height=90,im_size=512,max_font_size=25):\r\n for size in range(max_font_size,1,-1):\r\n font = ImageFont.truetype(font=font_path, size=size)\r\n if check_multiline_font(font,text,max_height,min_height,im_size):\r\n break\r\n return font\r\ndef draw_multiline(draw,size,font,text,min_height=90,color='black'):\r\n W,H = size\r\n # R = get_diam_for_size(W)\r\n R = W//2\r\n h = min_height\r\n later = text\r\n while later is not None:\r\n now, later = split_by_width(font,width4height(h,R),later)\r\n s = font.getsize(now)\r\n if const.DIRECTION=='rtl':\r\n now = utils.change_dir(now)\r\n draw.text(((W-s[0])//2,h),now,fill=color,font=font)\r\n h+=font.getsize(now)[1]\r\n return draw\r\ndef check_multiline_font(font,text,max_height,min_height=90,im_size=512):\r\n h = min_height\r\n later = text\r\n # R = get_diam_for_size(im_size)\r\n R = im_size//2\r\n while later is not None:\r\n now, later = split_by_width(font,width4height(h,R),later)\r\n h+=font.getsize(now)[1]\r\n return h<=max_height\r\ndef split_by_width(font,width,text):\r\n words = text.split(' ')\r\n if len(words)<=1:\r\n return text,None\r\n check = ''\r\n for i in range(len(words)+1):\r\n if i==len(words):\r\n break\r\n check += words[i]\r\n if font.getsize(check)[0]>width:\r\n break\r\n if i==0:\r\n return words[0], ' '.join(words[1:])\r\n return ' '.join(words[:i]),' '.join(words[i:])\r\ndef get_diam_for_size(size,perc=5):\r\n return size*(1-2*perc/100)\r\ndef width4height(height,R,p=5):\r\n '''\r\n allowed width for given height in circle with diameter diam\r\n '''\r\n p = 1-5*p/100\r\n h = height - R*(1-p)\r\n delta = (p**2-1)*R**2+2*R*height-height**2\r\n if delta<0:\r\n return 0\r\n\r\n return 2*(delta)**0.5","repo_name":"taloy42/BotC-Scripts","sub_path":"Tokens/token_creation.py","file_name":"token_creation.py","file_ext":"py","file_size_in_byte":10511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34819527722","text":"import random\n\n# Prices and shares for each stock item\nfb_price = 180\nfb_shares = 0\ngoog_price = 1285\ngoog_shares = 0\nmsft_price = 161\nmsft_shares = 0\ntsla_price = 702\ntsla_shares = 0\n\n# Starting money and day\nmoney = 10000\nday = 1\n\n# Run the game for 365 days or until the player goes bankrupt\nwhile day <= 365 and money >= 100:\n # Clear the screen and show the current status\n print(chr(27) + \"[2J\")\n print(\"Day\", day)\n print(\"Company Name Price Shares Owned\")\n print(\"--------------------------------------\")\n print(\"1. Facebook ${} {}\".format(fb_price, fb_shares))\n print(\"2. Google ${} {}\".format(goog_price, goog_shares))\n print(\"3. Microsoft ${} {}\".format(msft_price, msft_shares))\n print(\"4. Tesla ${} {}\".format(tsla_price, tsla_shares))\n print(\"Total value of all shares: ${}\".format(fb_price*fb_shares + goog_price*goog_shares + msft_price*msft_shares + tsla_price*tsla_shares))\n print(\"Total cash on hand: ${}\".format(money))\n \n # Show the menu and get the player's choice\n print(\"\\n1. Buy\\n2. Sell\\n3. End the day\")\n choice = input(\"What would you like to do? \")\n \n # Buy stocks\n if choice == \"1\":\n stock_choice = input(\"Which stock would you like to buy (1-4)? \")\n shares = int(input(\"How many shares would you like to buy? \"))\n if stock_choice == \"1\":\n if money >= fb_price * shares:\n fb_shares += shares\n money -= fb_price * shares\n elif stock_choice == \"2\":\n if money >= goog_price * shares:\n goog_shares += shares\n money -= goog_price * shares\n elif stock_choice == \"3\":\n if money >= msft_price * shares:\n msft_shares += shares\n money -= msft_price * shares\n elif stock_choice == \"4\":\n if money >= tsla_price * shares:\n tsla_shares += shares\n money -= tsla_price * shares\n \n # Sell stocks\n elif choice == \"2\":\n stock_choice = input(\"Which stock would you like to sell (1-4)? \")\n shares = int(input(\"How many shares would you like to sell? \"))\n if stock_choice == \"1\":\n if fb_shares >= shares:\n fb_shares -= shares\n money += fb_price * shares * 0.99\n elif stock_choice == \"2\":\n if goog_shares >= shares:\n goog_shares -= shares\n money += goog_price * shares * 0.99\n elif stock_choice == \"3\":\n if msft_shares >= shares:\n msft_shares -= shares\n money += msft_price * shares * 0.99\n elif stock_choice == \"4\":\n if tsla_shares >= shares:\n tsla_shares -= shares\n money += tsla_price * shares * 0.99\n \n# End the day and change stock prices\n elif choice == \"3\":\n day += 1\n print(\"End of day\", day)\n old_prices = [fb_price, goog_price, msft_price, tsla_price]\n for i in range(len(old_prices)):\n old_price = old_prices[i]\n new_price = max(0.01, round(random.uniform(0.9, 1.1) * old_price, 2))\n print([\"Facebook\", \"Google\", \"Microsoft\", \"Tesla\"][i], \"old price:\", old_price, \"new price:\", new_price)\n if new_price > old_price:\n print(f\"The price of {['Facebook', 'Google', 'Microsoft', 'Tesla'][i]} went up from ${old_price} to ${new_price}\")\n elif new_price < old_price:\n print(f\"The price of {['Facebook', 'Google', 'Microsoft', 'Tesla'][i]} went down from ${old_price} to ${new_price}\")\n else:\n print(f\"The price of {['Facebook', 'Google', 'Microsoft', 'Tesla'][i]} stayed the same at ${new_price}\")\n if i == 0:\n fb_price = new_price\n elif i == 1:\n goog_price = new_price\n elif i == 2:\n msft_price = new_price\n else:\n tsla_price = new_price\n print(f\"Day {day} is over. You have ${money} in cash.\")\n \n# Invalid choice\nelse:\n print(\"Invalid choice. Please try again.\") ","repo_name":"m4ns0or/stock-market","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"15455616605","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 27 14:20:51 2021\n\n@author: tudor\n\"\"\"\n\nimport torch\nimport torch.nn.functional as F\nfrom utils import sinFunction\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom trainModel import TrainModel\n\nimport myModel\n\n\n# we load the model\n\n# visualise the parameters for the ann (aka weights and biases)\n# for name, param in ann.named_parameters():\n# if param.requires_grad:\n# print (name, param.data)\n\n\nclass UI:\n def __init__(self, filePath=\"myNet.pt\"):\n self._filePath = filePath\n self.ann = myModel.Net()\n\n # self.ann.load_state_dict(torch.load(filePath))\n # self.ann.eval()\n\n self.commands = {\n '1': (self.visualizeParameters, 'Visualize parameters'),\n '2': (self.inputValues, 'Input values'),\n '3': (self.plot, 'Plot values'),\n '4': (self.train, 'Train'),\n '5': (self.loadAnn, 'Load ann')\n }\n\n def visualizeParameters(self):\n for name, param in self.ann.named_parameters():\n if param.requires_grad:\n print(name, param.data)\n\n def train(self):\n start = time.time()\n train = TrainModel()\n print(f\"Time to load: {time.time() - start}\")\n start = time.time()\n train.train()\n print(f\"Time to train: {time.time() - start}\")\n self.ann = train.ann\n plt.plot(train.averages)\n plt.show()\n\n def loadAnn(self):\n self.ann = myModel.Net()\n\n self.ann.load_state_dict(torch.load(self._filePath))\n self.ann.eval()\n\n def inputValues(self):\n while True:\n x = input(\"x = \")\n if x == 'x':\n return\n y = input(\"y = \")\n if y == 'x':\n return\n\n x, y = torch.tensor(float(x)), torch.tensor(float(y))\n print(f\"Computed Value: {self.ann(torch.tensor([x, y])).tolist()[0]}\")\n print(f\"Actual value: {sinFunction(x, y)}\\n\")\n\n def plot(self):\n points = set()\n size = int(input(\"Size: \"))\n while len(points) != size:\n points.add(\n tuple(np.random.uniform(-10, 10, 2))\n )\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n\n prediction = self.ann(torch.tensor(torch.tensor([(float(x), float(y)) for x, y in points]))).detach().numpy()\n\n x, y, z, zTrue = [], [], prediction, []\n for xCoord, yCoord in points:\n x.append(xCoord)\n y.append(yCoord)\n zTrue.append(sinFunction(torch.tensor(xCoord), torch.tensor(yCoord)).numpy())\n ax.scatter(x, y, zTrue, color='red')\n ax.scatter(x, y, z, color='blue')\n plt.show()\n\n def printMenu(self):\n for key, (_, text) in self.commands.items():\n print(f\"{key}. {text}\")\n\n def start(self):\n while True:\n self.printMenu()\n command = input(\">> \")\n if command == 'x':\n return\n if command in self.commands:\n self.commands[command][0]()\n else:\n print('Bad command')\n print()\n\n\nUI().start()\n","repo_name":"ComanacDragos/University","sub_path":"Semester 4/AI/Lab/Lab7/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"31824887998","text":"import numpy as np\nimport mcutils, sputils\n\n\ndef isocdata_to_cmd(isoc_dat, color, mag):\n \"\"\"Make a CMD from isochrone data.\n\n :param isoc_data:\n The isochrone data, as returned by\n StellarPopulation.isochrones(). It should be prefiltered by\n age and perhaps stellar type (phase).\n\n :param color:\n A tuple giving the bandnames and bin edges for the color. It\n should have the form ``('band1', 'band2', bins)`` where\n ``bins`` is ndarray of bin edges and ``'band1'`` and\n ``'band2'`` are the names of the FSPS filters that form color\n 'band1-band2'.\n \n :param mag:\n A tuple of absolute magnitude bins of the form ``('band',bins)``\n where bins is an ndarray of bin edges and `band' is the filter.\n\n :returns cmd:\n A 2-d numpy array of shape (nc, nm) giving the color magnitude\n diagram\n \"\"\"\n c = isoc_dat[color[0]] - isoc_dat[color[1]]\n m = isoc_dat[mag[0]]\n cmd, _, _ = np.histogram2d(c, m, bins=[color[2], mag[1]],\n weights=10**isoc_dat['log(weight)'])\n return cmd\n\ndef partial_cmds(isoc, color, mag):\n \"\"\"Make a partial CMDs (i.e. a series of CMDs of SSPSs) from\n isochrone data.\n\n :param isoc_data:\n The isochrone data, as returned by\n StellarPopulation.isochrones(). It should be prefiltered by\n stellar type (phase) if you want only the cmds for particular\n stellar types.\n\n :param color:\n A tuple giving the bandnames and bin edges for the color. It\n should have the form ``('band1', 'band2', bins)`` where\n ``bins`` is ndarray of bin edges and ``'band1'`` and\n ``'band2'`` are the names of the FSPS filters that form color\n 'band1-band2'.\n \n :param mag:\n A tuple of absolute magnitude bins of the form ``('band',bins)``\n where bins is an ndarray of bin edges and `band' is the filter.\n\n :returns cmds:\n A 3-d numpy array of shape (nage, nc, nm) giving the binned\n color magnitude diagrams for each age.\n \"\"\"\n agecol = 'age'\n ages = np.unique(isoc[agecol])\n if len(ages) == 0:\n return np.zeros([1, len(color[-1])-1, len(mag[-1])-1]), []\n cmds = []\n for age in ages:\n thisage = isoc[agecol] == age\n cmds.append(isocdata_to_cmd(isoc[thisage], color, mag))\n cmds = np.array(cmds)\n oo = np.argsort(ages)\n \n return cmds[oo, :,:], ages[oo]\n\ndef rebin(a, shape):\n \"\"\"Rebin array to new shape. New shape must be integer fractions\n of the old shape\n \"\"\"\n sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]\n return a.reshape(sh).mean(-1).mean(1)\n\ndef sps_expected(isoc):\n \"\"\"\n :param isoc:\n An isochrone as a numpy structured array, as returned by\n fsps.StellarPopulation.isochrones(). It must have a\n ``log(weight)`` field.\n \n :returns ssp_ages:\n The ages of the SSPs, log of yrs\n\n :returns ssp_nexpected:\n The sum of the imf_weights for isochrone points with this age.\n \"\"\"\n logages = isoc['age']\n ssp_ages = np.unique(logages)\n total_weights = [(10**(isoc[logages == thisage]['log(weight)'])).sum() for\n thisage in ssp_ages]\n return ssp_ages, np.array(total_weights)\n\ndef agb_select_function(isoc_dat, composition=0, **extras):\n \"\"\"\n Here's a function that selects certain rows from the full\n isochrone data and returns them. The selection criteria can\n involve any of the columns given by the isochrone data, including\n magnitudes (or colors) as well as things like logg, logL, etc.\n \"\"\"\n #select only objects cooler than 4000K and in tp-agb phase\n select = ( #(isoc_dat['logt'] < np.log10(4000.0)) &\n (isoc_dat['phase'] == 5.0)\n )\n if composition < 0:\n select = select & (isoc_dat['composition'] < -composition)\n elif composition > 0:\n select = select & (isoc_dat['composition'] > composition)\n print(select.sum())\n return isoc_dat[select]\n\n\ndef agb_select_function_villaume(isoc_dat, **extras):\n \"\"\"\n Here's a function that selects certain rows from the full\n isochrone data and returns them. The selection criteria can\n involve any of the columns given by the isochrone data, including\n magnitudes (or colors) as well as things like logg, logL, etc.\n \"\"\"\n #select only objects cooler than 4000K and in tp-agb phase\n select = ( (isoc_dat['logt'] < np.log10(4000.0)) &\n (isoc_dat['phase'] != 6.0) &\n (isoc_dat['age'] > 6)\n )\n \n print(select.sum())\n return isoc_dat[select]\n\ndef agb_select_function_cmd(isoc_dat, **kwargs):\n \n c, o, boyer, xagb = boyer_cmd_classes(isoc_dat, **kwargs)\n return isoc_dat[boyer | xagb]\n \ndef boyer_cmd_classes(isoc_dat, cloud='lmc', is_data_cat=False, **extras):\n \"\"\"Boyer cmd cuts.\n \"\"\"\n if cloud.lower() == 'lmc':\n cdat = {'trgb_k': 11.94, 'trgb_i1':11.9, 'dm':18.49, 'met':-0.3}\n delta_dm = 0.4\n elif cloud.lower() =='smc':\n cdat = {'trgb_k': 12.7, 'trgb_i1':12.6, 'dm':18.89, 'met':-0.7}\n delta_dm = 0.0\n\n\n j = isoc_dat['2mass_j'] + cdat['dm'] * int( not is_data_cat)\n k = isoc_dat['2mass_ks'] + cdat['dm'] * int( not is_data_cat)\n i1 = isoc_dat['irac_1'] + cdat['dm'] * int( not is_data_cat)\n i4 = isoc_dat['irac_4'] + cdat['dm'] * int( not is_data_cat)\n \n # Cioni\n k0, k1, k2 = cioni_klines(j-k, cloud=cloud, **cdat)\n cstar = (k < k0) & (k > k2)\n ostar = (k < k0) & (k > k1) & ~cstar\n cioni = ostar | cstar\n \n # Boyer trgb cut\n boyer = (cioni &\n ((k < cdat['trgb_k']) | (i1 < cdat['trgb_i1']))\n )\n # Boyer xagb cut\n xagb = ((i1 < cdat['trgb_i1']) &\n (((j-i1) > 3.1) | ((i1-i4) > 0.8)) &\n ((i4 + delta_dm) < (12.0 - 0.43 * (j-i4))) &\n ((i4 + delta_dm) < (11.5 - 1.33 * (i1-i4)))\n )\n return cstar, ostar, boyer, xagb\n \ndef cioni_klines(color, cloud=None, **extras):\n \"\"\"The cioni classification criteria, as related by M. Boyer.\n `color` should be j-ks. ``met`` is -0.3 for the LMC, and -0.7 for\n the SMC. ``dm`` for the SMC is 18.89\n \"\"\"\n # We add the differential distance modulus to K0 but not to K1 or K2\n #\n if cloud == 'lmc':\n met, dm = -0.3, 18.49\n elif cloud == 'smc':\n met, dm = -0.7, 18.89\n k0 = -0.48 * color + 13.022 + 0.056 * met + (dm - 18.49)\n k1 = -13.333 * color + 25.293 + 1.568 * met \n k2 = -13.333 * color + 29.026 + 1.568 * met\n return k0, k1, k2\n\ndef agb_select_function_cmd_old(isoc_dat, cloud='lmc', **extras):\n \"\"\"Select AGBs using CMD cuts.\n \"\"\"\n if cloud.lower() == 'lmc':\n return agb_select_function_cmd_lmc(isoc_dat, **extras)\n elif cloud.lower() == 'smc':\n return agb_select_function_cmd_smc(isoc_dat, **extras)\n else:\n raise ValueError('Invalid cloud designation')\n\ndef agb_select_function_cmd_lmc(isoc_dat, **extras):\n \"\"\"Trying to follow the Boyer et al. 2011 color cuts for AGB stars\n \"\"\"\n trgb = {'k': 11.94,'i1':11.9, 'dm':18.49}\n #difference between the SMC and LMC distance moduli\n delta_dm = 0.40\n \n j = isoc_dat['2mass_j'] + trgb['dm']\n k = isoc_dat['2mass_ks'] + trgb['dm']\n i1 = isoc_dat['irac_1'] + trgb['dm']\n i4 = isoc_dat['irac_4'] + trgb['dm']\n\n # Boyer X-AGB, accounting for distance differences\n xagb = ((i1 < trgb['i1']) &\n (((j-i1) > 3.1) | ((i1-i4) > 0.8)) &\n ((i4 + delta_dm) < (12.0 - 0.43 * (j-i4))) &\n ((i4 + delta_dm) < (11.5 - 1.33 * (i1-i4)))\n )\n \n # Cioni 2006a (LMC) cuts\n cioni = ((k < (-0.48 * (j-k) + 13)) &\n (k > (-13.33 * (j-k) + 24.666))\n )\n # Boyer trgb cut\n boyer = (cioni &\n ((k < trgb['k']) | (i1 < trgb['i1']))\n )\n \n # Cioni 2006a (LMC) K1 line, exluding xagbs\n cstars = (boyer & ~xagb &\n (k < (-13.333 * (j-k) + 28.4))\n )\n ostars = (boyer & ~xagb &\n (k > (-13.333 * (j-k) + 28.4))\n )\n\n select = boyer | xagb\n \n return isoc_dat[select] \n \ndef agb_select_function_cmd_smc(isoc_dat, **extras):\n \"\"\"Trying to follow the Boyer et al. 2011 color cuts for AGB stars\n For the SMC\n \"\"\"\n trgb = {'k': 12.7, 'i1':12.6, 'dm':18.89}\n #difference between the SMC and LMC distance moduli\n delta_dm = -0.40\n #metallicity effect on j-k color\n # 0.056 * Z (Z_LMC = -0.3, Z_SMC = -0.7)\n delta_jk = -0.05\n \n j = isoc_dat['2mass_j'] + trgb['dm']\n k = isoc_dat['2mass_ks'] + trgb['dm']\n i1 = isoc_dat['irac_1'] + trgb['dm']\n i4 = isoc_dat['irac_4'] + trgb['dm']\n \n xagb = ((i1 < trgb['i1']) &\n (((j-i1) > 3.1) | ((i1-i4) > 0.8)) &\n (i4 < (12.0 - 0.43 * (j-i4))) &\n ((i4 < (11.5 - 1.33 * (i1-i4))) | (((i1-i4) > 3) & (i4 < 7.51)))\n )\n #\n \n # Cioni 2006a (LMC) cuts adjusted for distance and metallicity\n \n cioni = (((k + delta_dm) < (-0.48 * (j - k + delta_jk) + 13)) &\n ((k + delta_dm) > (-13.33 * (j - k + delta_jk) + 24.666))\n )\n #these cuts already adjusted for distance through trgb differences\n boyer = (cioni &\n ((k < trgb['k']) | (i1 < trgb['i1']))\n )\n # Cioni 2006a (LMC) K1 line adjusted for distance and metallicity\n cstars = (boyer & ~xagb &\n ((k+ delta_dm) < (-13.333 * (j - k + delta_jk) + 28.4))\n )\n ostars = (boyer & ~xagb &\n ((k+ delta_dm) > (-13.333 * (j - k +delta_jk) + 28.4))\n )\n\n select = boyer | xagb\n \n return isoc_dat[select] \n\n\ndef make_freq_prediction(esfh, zmet, sps=None, cloud=None,\n select_function=agb_select_function,\n **kwargs):\n \"\"\" Make a frequency prediction for the number of objects as a function of time\n \"\"\"\n \n if sps is None:\n import fsps\n sps = fsps.StellarPopulation(compute_vega_mags=True)\n sps.params['sfh'] = 0\n sps.params['imf_type'] = 0\n sps.params['tpagb_norm_type'] = 2 #VCJ\n sps.params['add_agb_dust_model'] = True\n sps.params['agb_dust'] = 1.0\n\n for k, v in kwargs.iteritems():\n try:\n sps.params[k] = v\n except:\n pass\n \n \n #esfh = regions['AA']['sfhs'][0]\n sps.params['zmet'] = np.abs(zmet - sps.zlegend).argmin() + 1\n zactual = sps.zlegend[sps.params['zmet'] - 1]\n print(r'Using $Z={0}Z_\\odot$'.format(zactual/0.019))\n isoc = sps.isochrones()\n agbisoc = select_function(isoc, cloud=cloud, **kwargs)\n ssp_ages, ssp_nexpected = sps_expected(agbisoc, esfh)\n \n dt = np.concatenate([[10**ssp_ages[0]], 10**ssp_ages[1:] - 10**ssp_ages[:-1]])\n nexpected = np.zeros(len(esfh))\n\n asfh = esfh.copy()\n asfh['t1'] = 10**asfh['t1']\n asfh['t2'] = 10**asfh['t2']\n for i in range(len(asfh)):\n if asfh['t2'][i] < 10**ssp_ages.min():\n #print(i)\n continue\n asfh['sfr'] = 0\n asfh['sfr'][i] = 1.0/(asfh['t2'][i] - asfh['t1'][i])\n lt, sfr, fact = sputils.burst_sfh(f_burst=0.0, sfh = asfh, bin_res=20)\n aw = sputils.sfh_weights(lt, sfr, 10**ssp_ages)\n nexpected[i] = (aw[0,:] * ssp_nexpected).sum()\n \n# for i, (start, stop) in enumerate(zip(esfh['t1'], esfh['t2'])):\n# this = (ssp_ages <= stop) & (ssp_ages > start)\n# if this.sum() == 0:\n# continue\n# wght = dt.copy()\n# #adjust end weights\n# mi, ma = ssp_ages[this].argmin(), ssp_ages[this].argmax()\n# wght[this][mi] = 10**ssp_ages[this][mi] - 10**start\n# wght[this][ma] += 10**stop - 10**ssp_ages[this][ma]\n #need to do a weighted sum, with weights given by dt\n# nexpected[i] = (ssp_nexpected[this] * wght[this]).sum()/wght[this].sum()\n return nexpected, zactual\n\n \n\nif __name__ == \"__main__\":\n import fsps\n sps = fsps.StellarPopulation(compute_vega_mags=True)\n sps.params['sfh'] = 0\n sps.params['imf_type'] = 0\n sps.params['tpagb_norm_type'] = 2 #VCJ\n sps.params['add_agb_dust_model'] = True\n sps.params['agb_dust'] = 1.0\n\n\n cloud = []\n agb_norm_type = []\n selfn = []\n","repo_name":"bd-j/magellanic","sub_path":"magellanic/sfhs/cmdutils.py","file_name":"cmdutils.py","file_ext":"py","file_size_in_byte":12364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"31691154649","text":"# Write your solution here\nfrom random import shuffle, sample\n\ndef roll(die: str):\n die_a = [3, 3, 3, 3, 3, 6]\n die_b = [2, 2, 2, 5, 5, 5]\n die_c = [1, 4, 4, 4, 4, 4]\n \n if die == \"A\":\n return sample(die_a, 1)[0]\n elif die == \"B\":\n return sample(die_b, 1)[0]\n elif die == \"C\":\n return sample(die_c, 1)[0]\n \ndef play(die1: str, die2: str, times: int):\n die_a = [3, 3, 3, 3, 3, 6]\n die_b = [2, 2, 2, 5, 5, 5]\n die_c = [1, 4, 4, 4, 4, 4]\n\n p1_wins = 0\n p2_wins = 0\n ties = 0\n\n for i in range(0, times):\n p1_turn = 0\n p2_turn = 0\n\n if die1 == \"A\":\n p1_turn = sample(die_a, 1)[0]\n elif die1 == \"B\":\n p1_turn = sample(die_b, 1)[0]\n elif die1 == \"C\":\n p1_turn = sample(die_c, 1)[0]\n \n if die2 == \"A\":\n p2_turn = sample(die_a, 1)[0]\n elif die2 == \"B\":\n p2_turn = sample(die_b, 1)[0]\n elif die2 == \"C\":\n p2_turn = sample(die_c, 1)[0]\n\n if p1_turn > p2_turn:\n p1_wins += 1\n elif p2_turn > p1_turn:\n p2_wins += 1\n else:\n ties += 1\n \n return(p1_wins, p2_wins, ties)\n\nif __name__ == \"__main__\":\n\n for i in range(20):\n print(roll(\"A\"), \" \", end=\"\")\n print()\n\n for i in range(20):\n print(roll(\"B\"), \" \", end=\"\")\n print()\n \n for i in range(20):\n print(roll(\"C\"), \" \", end=\"\")\n print()\n\n print()\n \n result = play(\"A\", \"C\", 1000)\n print(result)\n\n result = play(\"B\", \"B\", 1000)\n print(result)","repo_name":"RumaanRazzaq/Learning-Python","sub_path":"part07/part07-07_dice_roller/src/dice_roller.py","file_name":"dice_roller.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71408983810","text":"class Solution:\n def solve(self, matrix):\n def swap(a, b):\n temp = matrix[a[0]][a[1]]\n matrix[a[0]][a[1]] = matrix[b[0]][b[1]]\n matrix[b[0]][b[1]] = temp\n \n if len(matrix) == 1:\n return matrix\n \n n = len(matrix)\n for i in range(int(n/2)+1):\n for j in range(i, n-1):\n swap((i,j), (j,n-1-i))\n swap((j,n-1-i),(n-1-i,n-1-j))\n swap((n-1-i,n-1-j), (n-1-j,i))\n return matrix\n\nmatrix = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n]\nfor row in matrix:\n print(row)\n\nprint('---------')\n\ns = Solution()\ns.solve(matrix)\n\nfor row in matrix:\n print(row)","repo_name":"lgdelacruz92/LeetCode-Challenges","sub_path":"rotate-matrix/rotate-matrix.py","file_name":"rotate-matrix.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"72825496764","text":"import pytest\nimport brownie\nfrom brownie import Wei, accounts, Contract, config\n\n\n@pytest.mark.require_network(\"mainnet-fork\")\ndef test_clone(\n chain,\n gov,\n token,\n strategist,\n rewards,\n keeper,\n strategy,\n Strategy,\n vault,\n token_vault,\n token_vault_registry,\n liquidity_mining,\n comfi,\n user,\n amount,\n):\n # Shouldn't be able to call initialize again\n with brownie.reverts():\n strategy.initialize(\n vault,\n strategist,\n rewards,\n keeper,\n token_vault,\n token_vault_registry,\n liquidity_mining,\n comfi,\n {\"from\": gov},\n )\n\n # Clone the strategy\n tx = strategy.cloneStrategy(\n vault,\n strategist,\n rewards,\n keeper,\n token_vault,\n token_vault_registry,\n liquidity_mining,\n comfi,\n {\"from\": gov},\n )\n new_strategy = Strategy.at(tx.return_value)\n\n # Shouldn't be able to call initialize again\n with brownie.reverts():\n new_strategy.initialize(\n vault,\n strategist,\n rewards,\n keeper,\n token_vault,\n token_vault_registry,\n liquidity_mining,\n comfi,\n {\"from\": gov},\n )\n\n vault.revokeStrategy(strategy, {\"from\": gov})\n vault.addStrategy(new_strategy, 10_000, 0, 1_000, {\"from\": gov})\n\n user_start_balance = token.balanceOf(user)\n before_pps = vault.pricePerShare()\n token.approve(vault.address, amount, {\"from\": user})\n vault.deposit({\"from\": user})\n\n new_strategy.harvest({\"from\": gov})\n\n chain.sleep(3600)\n chain.mine(100)\n\n # Get profits and withdraw\n new_strategy.harvest({\"from\": gov})\n chain.sleep(3600 * 6)\n chain.mine(1)\n\n vault.withdraw({\"from\": user})\n user_end_balance = token.balanceOf(user)\n\n assert vault.pricePerShare() > before_pps\n assert user_end_balance > user_start_balance\n","repo_name":"fp-crypto/complifi-strategy","sub_path":"tests/test_clone.py","file_name":"test_clone.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"505742566","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#==============================================================================\n# model paths classes\n#==============================================================================\nimport os\nimport glob\nimport shutil\nimport sys\n#-----------------------------------------------------------------------\nclass model_paths(object):\n\n def __init__(self):\n callPath=os.path.dirname(sys.argv[0])\n if not (callPath == \".\" or callPath == \"\"):\n self.thisPath = os.getcwd()+'/'+os.path.dirname(sys.argv[0])\n else:\n self.thisPath = os.getcwd()\n #print(callPath, self.thisPath)\n splitPath = os.path.split(self.thisPath)\n splitPath = os.path.split(splitPath[0])\n self.basePath = splitPath[0]\n self.runPath = self.basePath+\"/run\"\n self.experimentsListPath = self.thisPath+\"/experiment_lists\"\n\n def get_experimentsNames_inPaths(self, pathsInRun):\n os.chdir(self.runPath)\n experimentsNames = []\n for path in pathsInRun:\n experiment = glob.glob(path)\n if not experiment:\n print(\"No experiment \"+path+\" found. Stop\")\n quit(1)\n experimentsNames.extend(experiment)\n os.chdir(self.thisPath)\n return experimentsNames\n\n def get_runpath(self):\n return self.runPath\n\n def thisExperimentExists(self, experimentName):\n return os.path.isfile(self.runPath+\"/\"+experimentName)\n\n def get_thisListPath(self, listName):\n return self.experimentsListPath+\"/\"+listName\n\n def thisListExists(self, listName):\n return os.path.isfile(self.get_thisListPath(listName))\n\n def deleteThisList(self, listName):\n if not self.thisListExists(listName):\n print(\"The list \"+listName+\" does not exist.\")\n quit(1)\n os.remove(self.get_thisListPath(listName))\n \n def copyList(self, fromlist, tolist):\n if not self.thisListExists(fromlist):\n print(\"The list \"+fromlist+\" does not exist.\")\n quit(1)\n if self.thisListExists(tolist):\n print(\"The list \"+tolist+\" exists. Please remove it first.\")\n quit(1)\n shutil.copy(self.get_thisListPath(fromlist), self.get_thisListPath(tolist))\n if not self.thisListExists(tolist):\n print(\"Copy failed\")\n quit(1)\n \n\n def print_paths(self):\n print(\"Base path:\"+self.basePath)\n print(\"Run path:\"+self.runPath)\n print(\"This path:\"+self.thisPath)\n\n def getPathAndName(self, PathName):\n dirName = os.path.dirname(PathName)\n fileName = os.path.basename(PathName)\n return dirName, fileName\n \npaths=model_paths()\n\n","repo_name":"muszyna25/ppe_icon_model","sub_path":"scripts/buildbot_scripts/model_paths.py","file_name":"model_paths.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"41711116838","text":"import os\nimport ast\nimport json\nimport torch\nimport datetime\nimport argparse\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom tqdm import tqdm\nfrom model import SegNet,UNet,DeepLab_v3_plus,DeepLab_v1\nfrom model import MEPDNet\nfrom model import R2U_Net, AttU_Net, R2AttU_Net, SCSEUnet, CE_Net_, NestedUNet\n\nfrom utils import get_range_limited_float_type,train, test, use\nfrom utils import timewrapper,setup_logger,ToLabel\nfrom torchvision.transforms import Compose,Normalize,ToTensor,ToPILImage\n\ndef get_config():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', required=True)\n parser.add_argument('--mode',type=str,choices=['train', 'test', 'use'],required=True)\n parser.add_argument('--config-path',type=str,default='config/cfg.json')\n parser.add_argument('--gpu-ids',type=int,nargs='+',default=0,dest='gpu_ids')\n parser.add_argument('--state',type=int,default=1,dest='state')\n parser.add_argument('-e', '--epochs', type=int, default=5,\n help='Number of epochs', dest='epochs')\n parser.add_argument('-b', '--batch-size', type=int, default=1,\n help='Batch size', dest='batch_size')\n parser.add_argument('-l', '--learning-rate', type=float, default=0.1,\n help='Learning rate', dest='lr')\n parser.add_argument('-p', '--port', type=int,default=10001,\n help='Visualization port', dest='port')\n parser.add_argument('-w', '--worker-num', type=int, default=1,\n help='Dataloader worker number', dest='num_workers')\n parser.add_argument('-c', '--class-num', type=int, default=2,\n help='class number', dest='class_num')\n parser.add_argument('-v', '--valid-percent', type=get_range_limited_float_type(0,100), default=10.0,\n help='Percent of the data that is used as validation (0-100)', dest='valid_percent')\n parser.add_argument('-s', '--sequence', default=False, type=ast.literal_eval, choices=[True, False], help=\"sequence model\", dest='seq')\n\n args = parser.parse_args()\n\n assert os.path.exists(args.config_path),'config json not exists'\n with open(args.config_path,'r') as f:\n config = json.load(f)\n\n for arg in vars(args):\n config[arg]=getattr(args,arg)\n \n if isinstance(config['gpu_ids'],int):\n config['gpu_ids'] = [config['gpu_ids']]\n config['gpu_ids'] = list(set(config['gpu_ids']))\n config['device'] = 'cuda' if torch.cuda.is_available() else 'cpu'\n if config['device'] == 'cuda':\n gpu_num = torch.cuda.device_count()\n assert len(config['gpu_ids'])!=0,'unexpected gpu number'\n for gpu_id in config['gpu_ids']:\n assert gpu_id>=0 and gpu_id